code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE =[ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: lowercase_ : str = k.replace(_A , _A ) if k.startswith('encoder' ): lowercase_ : Tuple = k.replace('.attn' , '.self_attn' ) lowercase_ : Any = k.replace('norm1' , 'self_attn_layer_norm' ) lowercase_ : Dict = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): lowercase_ : Dict = k.replace('norm1' , 'self_attn_layer_norm' ) lowercase_ : str = k.replace('norm2' , 'encoder_attn_layer_norm' ) lowercase_ : List[str] = k.replace('norm3' , 'final_layer_norm' ) return k def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : List[Any] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: lowercase_ : List[str] = sd.pop(_A ) lowercase_ : List[str] = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd lowercase_ : Tuple = v __SCREAMING_SNAKE_CASE =['START'] @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : Any = torch.load(_A , map_location='cpu' ) lowercase_ : Dict = model['model'] lowercase_ : Any = BlenderbotConfig.from_json_file(_A ) lowercase_ : Tuple = BlenderbotForConditionalGeneration(_A ) lowercase_ : List[Any] = m.model.state_dict().keys() lowercase_ : List[Any] = [] lowercase_ : Union[str, Any] = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue lowercase_ : Optional[Any] = rename_state_dict_key(_A ) if new_k not in valid_keys: failures.append([k, new_k] ) else: lowercase_ : List[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(_A ) m.model.load_state_dict(_A , strict=_A ) m.half() m.save_pretrained(_A ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : dict ): lowercase_ : List[Any] = set() # edges = list of graph's edges lowercase_ : Any = get_edges(__SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowercase_ : List[Any] = edges.pop() chosen_vertices.add(__SCREAMING_SNAKE_CASE ) chosen_vertices.add(__SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__SCREAMING_SNAKE_CASE ) return chosen_vertices def lowercase__( __SCREAMING_SNAKE_CASE : dict ): lowercase_ : Optional[int] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE ={ "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = data lowercase_ : List[str] = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64) lowercase_ : Dict = self.data + padding + struct.pack('>Q' ,8 * len(self.data ) ) return padded_data def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 ) ] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = list(struct.unpack('>16L' ,lowercase__ ) ) + [0] * 64 for i in range(16 ,80 ): lowercase_ : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 ) return w def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Any = self.padding() lowercase_ : List[str] = self.split_blocks() for block in self.blocks: lowercase_ : Optional[Any] = self.expand_block(lowercase__ ) lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.h for i in range(0 ,80 ): if 0 <= i < 20: lowercase_ : List[str] = (b & c) | ((~b) & d) lowercase_ : Dict = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase_ : Tuple = b ^ c ^ d lowercase_ : List[Any] = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase_ : List[Any] = (b & c) | (b & d) | (c & d) lowercase_ : List[str] = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase_ : int = b ^ c ^ d lowercase_ : Union[str, Any] = 0XC_A_6_2_C_1_D_6 lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = ( self.rotate(lowercase__ ,5 ) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(lowercase__ ,30 ), c, d, ) lowercase_ : str = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h ) def lowercase__( ): lowercase_ : Dict = b'Test String' assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def lowercase__( ): lowercase_ : Any = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowercase_ : str = parser.parse_args() lowercase_ : Any = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase_ : List[Any] = f.read() else: lowercase_ : int = bytes(A__ , 'utf-8' ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str: lowercase_ : List[str] = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('encoder.deit.cls_token', 'encoder.embeddings.cls_token'), ('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'), ('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'), ('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'), ('encoder.deit.norm.weight', 'encoder.layernorm.weight'), ('encoder.deit.norm.bias', 'encoder.layernorm.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowercase_ : List[Any] = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) lowercase_ : Dict = in_proj_weight[ : encoder_config.hidden_size, : ] lowercase_ : Dict = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowercase_ : str = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: lowercase_ : Optional[int] = dct.pop(_UpperCAmelCase ) lowercase_ : int = val def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]: if "handwritten" in checkpoint_url: lowercase_ : Tuple = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowercase_ : List[str] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg' lowercase_ : List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> List[str]: lowercase_ : List[Any] = ViTConfig(image_size=3_84 , qkv_bias=_UpperCAmelCase ) lowercase_ : List[Any] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowercase_ : List[Any] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder lowercase_ : int = 10_24 lowercase_ : Union[str, Any] = 40_96 lowercase_ : Union[str, Any] = 24 lowercase_ : int = 16 lowercase_ : List[Any] = 10_24 else: raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowercase_ : Optional[Any] = False lowercase_ : List[str] = 'relu' lowercase_ : List[Any] = 10_24 lowercase_ : Any = True lowercase_ : List[Any] = False lowercase_ : Any = False # load HuggingFace model lowercase_ : Tuple = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase ) lowercase_ : int = TrOCRForCausalLM(_UpperCAmelCase ) lowercase_ : Union[str, Any] = VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase ) model.eval() # load state_dict of original model, rename some keys lowercase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' , check_hash=_UpperCAmelCase )['model'] lowercase_ : List[str] = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowercase_ : List[Any] = state_dict.pop(_UpperCAmelCase ) if key.startswith('decoder' ) and "output_projection" not in key: lowercase_ : Any = val else: lowercase_ : Union[str, Any] = val # load state dict model.load_state_dict(_UpperCAmelCase ) # Check outputs on an image lowercase_ : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size ) lowercase_ : Optional[int] = RobertaTokenizer.from_pretrained('roberta-large' ) lowercase_ : Any = TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase ) lowercase_ : Optional[Any] = processor(images=prepare_img(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values # verify logits lowercase_ : int = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowercase_ : Dict = model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) lowercase_ : int = outputs.logits lowercase_ : Optional[Any] = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: lowercase_ : List[str] = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: lowercase_ : Union[str, Any] = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: lowercase_ : Tuple = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: lowercase_ : Any = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected" Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCAmelCase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __SCREAMING_SNAKE_CASE ={ "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ): if got_ver is None or want_ver is None: raise ValueError( F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider''' F''' reinstalling {pkg}.''' ) if not ops[op](version.parse(a__ ) , version.parse(a__ ) ): raise ImportError( F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ): lowercase_ : Dict = F'''\n{hint}''' if hint is not None else '' # non-versioned check if re.match(R'^[\w_\-\d]+$' , a__ ): lowercase_ , lowercase_ , lowercase_ : Any = requirement, None, None else: lowercase_ : Optional[int] = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , a__ ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but' F''' got {requirement}''' ) lowercase_ , lowercase_ : Union[str, Any] = match[0] lowercase_ : List[str] = want_full.split(',' ) # there could be multiple requirements lowercase_ : List[Any] = {} for w in want_range: lowercase_ : str = re.findall(R'^([\s!=<>]{1,2})(.+)' , a__ ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,' F''' but got {requirement}''' ) lowercase_ , lowercase_ : Optional[int] = match[0] lowercase_ : str = want_ver if op not in ops: raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' ) # special case if pkg == "python": lowercase_ : Tuple = '.'.join([str(a__ ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(a__ , a__ , a__ , a__ , a__ , a__ ) return # check if any version is installed try: lowercase_ : List[Any] = importlib.metadata.version(a__ ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(a__ , a__ , a__ , a__ , a__ , a__ ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : Union[str, Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main' return require_version(a__ , a__ )
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=0.6 ,__UpperCamelCase=None ,) -> str: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : List[str] = batch_size lowercase_ : Tuple = image_size lowercase_ : Optional[Any] = patch_size lowercase_ : Union[str, Any] = num_channels lowercase_ : str = is_training lowercase_ : str = use_labels lowercase_ : Tuple = hidden_size lowercase_ : Tuple = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : str = intermediate_size lowercase_ : List[str] = hidden_act lowercase_ : str = hidden_dropout_prob lowercase_ : Optional[Any] = attention_probs_dropout_prob lowercase_ : Optional[Any] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : Any = mask_ratio lowercase_ : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase_ : int = (image_size // patch_size) ** 2 lowercase_ : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : List[str] = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Tuple = ViTMAEModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase_ : Any = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = ViTMAEForPreTraining(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase_ : List[str] = model(UpperCAmelCase__ ) lowercase_ : str = (self.image_size // self.patch_size) ** 2 lowercase_ : Optional[int] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase_ : Optional[Any] = 1 lowercase_ : Optional[int] = ViTMAEForPreTraining(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Any = model(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs lowercase_ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( __lowercase , __lowercase , unittest.TestCase ): lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowercase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[Any] = ViTMAEModelTester(self ) lowercase_ : Union[str, Any] = ConfigTester(self ,config_class=UpperCAmelCase__ ,has_text_modality=UpperCAmelCase__ ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Union[str, Any] = model_class(UpperCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ ,nn.Linear ) ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Any = model_class(UpperCAmelCase__ ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] ,UpperCAmelCase__ ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' np.random.seed(2 ) lowercase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowercase_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase_ : str = torch.from_numpy(UpperCAmelCase__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase_ : Optional[int] = pt_noise super().check_pt_tf_models(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Union[str, Any] = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(UpperCAmelCase__ ,UpperCAmelCase__ ) ) lowercase_ : str = outputs[0].cpu().numpy() lowercase_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase__ ) lowercase_ : List[Any] = model_class.from_pretrained(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowercase_ : List[Any] = model(**self._prepare_for_class(UpperCAmelCase__ ,UpperCAmelCase__ ) ) # Make sure we don't have nans lowercase_ : Any = after_outputs[0].cpu().numpy() lowercase_ : List[str] = 0 lowercase_ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCAmelCase__ ,1e-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' pass @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : str = ViTMAEModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def lowercase__( ): lowercase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' np.random.seed(2 ) lowercase_ : List[str] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(UpperCAmelCase__ ) lowercase_ : Dict = self.default_image_processor lowercase_ : List[str] = prepare_img() lowercase_ : Optional[Any] = image_processor(images=UpperCAmelCase__ ,return_tensors='pt' ).to(UpperCAmelCase__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase_ : str = ViTMAEConfig() lowercase_ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase_ : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**UpperCAmelCase__ ,noise=torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ ) ) # verify the logits lowercase_ : Optional[int] = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape ,UpperCAmelCase__ ) lowercase_ : Dict = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(UpperCAmelCase__ ) ,atol=1e-4 ) )
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase ( UpperCamelCase_ ): def __init__( self ,*__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Dict: '''simple docstring''' super().__init__(*_a ,**_a ) lowercase_ : List[Any] = eval_examples lowercase_ : Any = post_process_function def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase = "eval" ) -> int: '''simple docstring''' lowercase_ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset lowercase_ : List[str] = self.get_eval_dataloader(_a ) lowercase_ : Dict = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase_ : Dict = self.compute_metrics lowercase_ : List[Any] = None lowercase_ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase_ : Dict = time.time() try: lowercase_ : str = eval_loop( _a ,description='Evaluation' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,) finally: lowercase_ : Optional[int] = compute_metrics lowercase_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase_ : Union[str, Any] = self.post_process_function(_a ,_a ,output.predictions ) lowercase_ : Any = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowercase_ : Any = metrics.pop(_a ) metrics.update(output.metrics ) else: lowercase_ : Tuple = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_a ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase_ : str = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_a ) return metrics def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase = "test" ) -> Optional[int]: '''simple docstring''' lowercase_ : int = self.get_test_dataloader(_a ) # Temporarily disable metric computation, we will do it in the loop here. lowercase_ : Dict = self.compute_metrics lowercase_ : Optional[int] = None lowercase_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowercase_ : Dict = time.time() try: lowercase_ : Optional[int] = eval_loop( _a ,description='Prediction' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,) finally: lowercase_ : Dict = compute_metrics lowercase_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase_ : Tuple = self.post_process_function(_a ,_a ,output.predictions ,'predict' ) lowercase_ : str = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowercase_ : str = metrics.pop(_a ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_a )
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE ={ 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=10 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=None ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = size if size is not None else {"shortest_edge": 18} lowercase_ : int = crop_size if crop_size is not None else {"height": 18, "width": 18} lowercase_ : Tuple = parent lowercase_ : List[Any] = batch_size lowercase_ : List[str] = num_channels lowercase_ : int = num_frames lowercase_ : Union[str, Any] = image_size lowercase_ : Tuple = min_resolution lowercase_ : Tuple = max_resolution lowercase_ : str = do_resize lowercase_ : Optional[int] = size lowercase_ : Optional[int] = do_normalize lowercase_ : Dict = image_mean lowercase_ : List[Any] = image_std lowercase_ : List[Any] = crop_size def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = VivitImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = VivitImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'do_center_crop' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'size' ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} ) lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowercase_ : Dict = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ) for video in video_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(video[0] ,Image.Image ) # Test not batched input lowercase_ : Any = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : str = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE ) for video in video_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(video[0] ,np.ndarray ) # Test not batched input lowercase_ : Any = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : List[str] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE ) for video in video_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertIsInstance(video[0] ,torch.Tensor ) # Test not batched input lowercase_ : List[str] = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Any = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __lowerCamelCase ( __snake_case ): lowercase = "roformer" def __init__( self ,__UpperCamelCase=5_0000 ,__UpperCamelCase=None ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1536 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ ) lowercase_ : Tuple = vocab_size lowercase_ : Optional[int] = hidden_size if embedding_size is None else embedding_size lowercase_ : Optional[Any] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : Optional[Any] = hidden_act lowercase_ : Any = intermediate_size lowercase_ : Union[str, Any] = hidden_dropout_prob lowercase_ : Optional[int] = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[Any] = type_vocab_size lowercase_ : List[str] = initializer_range lowercase_ : Optional[int] = layer_norm_eps lowercase_ : Optional[Any] = rotary_value lowercase_ : str = use_cache class __lowerCamelCase ( __snake_case ): @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": lowercase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase_ : Optional[Any] = {0: """batch""", 1: """sequence"""} lowercase_ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE =torch.device("cpu") def lowercase__( ): lowercase_ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ ) lowercase_ : int = val def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : str = [] for k in state_dict.keys(): lowercase_ : List[Any] = k if ".pwconv" in k: lowercase_ : Optional[Any] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: lowercase_ : str = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: lowercase_ : int = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: lowercase_ : Optional[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: lowercase_ : List[Any] = k_new.split('.' ) if ls[2].isdigit(): lowercase_ : Optional[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: lowercase_ : Any = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowercase_ : Tuple = 10_00 lowercase_ : Optional[int] = 'huggingface/label-files' lowercase_ : Optional[Any] = 'imagenet-1k-id2label.json' lowercase_ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) ) lowercase_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase_ : int = idalabel lowercase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowercase_ : str = [3, 3, 6, 4] lowercase_ : List[Any] = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": lowercase_ : int = [3, 3, 9, 6] lowercase_ : Any = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": lowercase_ : Optional[int] = [4, 3, 10, 5] lowercase_ : Dict = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": lowercase_ : Union[str, Any] = [4, 4, 12, 6] lowercase_ : int = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): lowercase_ : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' , check_hash=SCREAMING_SNAKE_CASE_ ) else: lowercase_ : Any = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' ) lowercase_ : int = checkpoint lowercase_ : str = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowercase_ : List[str] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ ).eval() hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # prepare test inputs lowercase_ : Tuple = prepare_img() lowercase_ : int = ViTImageProcessor.from_pretrained('preprocessor_config' ) lowercase_ : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) # compare outputs from both models lowercase_ : List[str] = get_expected_output(SCREAMING_SNAKE_CASE_ ) lowercase_ : Optional[int] = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCamelCase_ ): lowercase = ["""transformers""", """torch""", """note_seq"""] def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str: '''simple docstring''' requires_backends(self ,['transformers', 'torch', 'note_seq'] ) @classmethod def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> int: '''simple docstring''' requires_backends(cls ,['transformers', 'torch', 'note_seq'] ) @classmethod def _UpperCAmelCase ( cls ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]: '''simple docstring''' requires_backends(cls ,['transformers', 'torch', 'note_seq'] )
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' if k in (0.04, 0.06): lowercase_ : Optional[int] = k lowercase_ : Union[str, Any] = window_size else: raise ValueError('invalid k value' ) def __str__( self ) -> Dict: '''simple docstring''' return str(self.k ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : List[Any] = cva.imread(__lowerCamelCase ,0 ) lowercase_ : Union[str, Any] = img.shape lowercase_ : list[list[int]] = [] lowercase_ : Optional[Any] = img.copy() lowercase_ : Optional[Any] = cva.cvtColor(__lowerCamelCase ,cva.COLOR_GRAY2RGB ) lowercase_ : List[Any] = np.gradient(__lowerCamelCase ) lowercase_ : Union[str, Any] = dx**2 lowercase_ : str = dy**2 lowercase_ : List[Any] = dx * dy lowercase_ : str = 0.04 lowercase_ : Union[str, Any] = self.window_size // 2 for y in range(__lowerCamelCase ,h - offset ): for x in range(__lowerCamelCase ,w - offset ): lowercase_ : Optional[int] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ : Any = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ : Optional[int] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase_ : Tuple = (wxx * wyy) - (wxy**2) lowercase_ : Optional[int] = wxx + wyy lowercase_ : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,255 ) return color_img, corner_list if __name__ == "__main__": __SCREAMING_SNAKE_CASE =HarrisCorner(0.04, 3) __SCREAMING_SNAKE_CASE =edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = parent lowercase_ : Dict = 13 lowercase_ : Tuple = 7 lowercase_ : List[Any] = 30 lowercase_ : int = self.seq_length + self.mem_len lowercase_ : Tuple = 15 lowercase_ : int = True lowercase_ : List[str] = True lowercase_ : List[Any] = 99 lowercase_ : Optional[int] = [10, 50, 80] lowercase_ : int = 32 lowercase_ : Optional[Any] = 32 lowercase_ : Optional[Any] = 4 lowercase_ : Any = 8 lowercase_ : Union[str, Any] = 128 lowercase_ : List[str] = 2 lowercase_ : Any = 2 lowercase_ : int = None lowercase_ : Optional[Any] = 1 lowercase_ : str = 0 lowercase_ : Optional[Any] = 3 lowercase_ : Tuple = self.vocab_size - 1 lowercase_ : Dict = 0.01 def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Optional[Any] = None if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : int = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Union[str, Any] = TFTransfoXLModel(lowercase_ ) lowercase_ , lowercase_ : Any = model(lowercase_ ).to_tuple() lowercase_ : int = {'input_ids': input_ids_a, 'mems': mems_a} lowercase_ , lowercase_ : Dict = model(lowercase_ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = TFTransfoXLLMHeadModel(lowercase_ ) lowercase_ , lowercase_ : Union[str, Any] = model(lowercase_ ).to_tuple() lowercase_ : List[Any] = {'input_ids': input_ids_a, 'labels': lm_labels} lowercase_ , lowercase_ : List[str] = model(lowercase_ ).to_tuple() lowercase_ , lowercase_ : str = model([input_ids_a, mems_a] ).to_tuple() lowercase_ : List[str] = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels} lowercase_ , lowercase_ : str = model(lowercase_ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : List[str] = TFTransfoXLForSequenceClassification(lowercase_ ) lowercase_ : Any = model(lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Tuple = self.prepare_config_and_inputs() ((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) : int = config_and_inputs lowercase_ : Optional[int] = {'input_ids': input_ids_a} return config, inputs_dict @require_tf class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowercase = () if is_tf_available() else () lowercase = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Dict = TFTransfoXLModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=lowercase_ ,d_embed=37 ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' self.model_tester.set_seed() lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowercase_ ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' self.model_tester.set_seed() lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_ ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_ ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Dict = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase_ : int = model_class(lowercase_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase_ : List[str] = model.get_output_embeddings() assert isinstance(lowercase_ ,tf.keras.layers.Layer ) lowercase_ : List[str] = model.get_bias() assert name is None else: lowercase_ : Dict = model.get_output_embeddings() assert x is None lowercase_ : Optional[int] = model.get_bias() assert name is None def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' pass @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Any = TFTransfoXLModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @require_tf class UpperCamelCase ( unittest.TestCase ): @unittest.skip('Skip test until #12651 is resolved.' ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' ) # fmt: off lowercase_ : Any = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase_ : Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase_ : int = model.generate(lowercase_ ,max_length=200 ,do_sample=lowercase_ ) self.assertListEqual(output_ids[0].numpy().tolist() ,lowercase_ )
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('Destination width/height should be > 0' ) lowercase_ : Union[str, Any] = img lowercase_ : Optional[int] = img.shape[1] lowercase_ : str = img.shape[0] lowercase_ : int = dst_width lowercase_ : str = dst_height lowercase_ : Tuple = self.src_w / self.dst_w lowercase_ : Tuple = self.src_h / self.dst_h lowercase_ : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase_ : str = self.img[self.get_y(__UpperCamelCase )][self.get_x(__UpperCamelCase )] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return int(self.ratio_x * x ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =800, 600 __SCREAMING_SNAKE_CASE =imread("image_data/lena.jpg", 1) __SCREAMING_SNAKE_CASE =NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" from math import ceil def lowercase__( __SCREAMING_SNAKE_CASE : Dict = 10_01 ): lowercase_ : Optional[int] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowercase_ : List[str] = 2 * i + 1 lowercase_ : Optional[Any] = 2 * i lowercase_ : Tuple = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: __SCREAMING_SNAKE_CASE =int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number")
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" from timeit import timeit __SCREAMING_SNAKE_CASE ={ "MALAYALAM": True, "String": False, "rotor": True, "level": True, "A": True, "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Any = 0 lowercase_ : str = len(lowerCamelCase__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = len(lowerCamelCase__ ) // 2 lowercase_ : str = len(lowerCamelCase__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase__ ) ) def lowercase__( __SCREAMING_SNAKE_CASE : str ): if len(lowerCamelCase__ ) <= 2: return True if s[0] == s[len(lowerCamelCase__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def lowercase__( __SCREAMING_SNAKE_CASE : str ): return s == s[::-1] def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Optional[Any] = F'''all({name}(key) is value for key, value in test_data.items())''' lowercase_ : int = F'''from __main__ import test_data, {name}''' lowercase_ : List[str] = 50_00_00 lowercase_ : List[str] = timeit(stmt=lowerCamelCase__ , setup=lowerCamelCase__ , number=lowerCamelCase__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print("a man a plan a canal panama") # finished 500,000 runs in 0.46793 seconds benchmark_function("is_palindrome_slice") # finished 500,000 runs in 0.85234 seconds benchmark_function("is_palindrome") # finished 500,000 runs in 1.32028 seconds benchmark_function("is_palindrome_recursive") # finished 500,000 runs in 2.08679 seconds benchmark_function("is_palindrome_traversal")
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __SCREAMING_SNAKE_CASE ={ "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" class UpperCamelCase : def __init__( self ) -> None: '''simple docstring''' lowercase_ : dict[str, TrieNode] = {} # Mapping from char to TrieNode lowercase_ : List[str] = False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' for word in words: self.insert(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : List[str] = self for char in word: if char not in curr.nodes: lowercase_ : List[str] = TrieNode() lowercase_ : Dict = curr.nodes[char] lowercase_ : Any = True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : List[Any] = self for char in word: if char not in curr.nodes: return False lowercase_ : Union[str, Any] = curr.nodes[char] return curr.is_leaf def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' def _delete(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool: if index == len(__UpperCamelCase ): # If word does not exist if not curr.is_leaf: return False lowercase_ : List[str] = False return len(curr.nodes ) == 0 lowercase_ : Any = word[index] lowercase_ : Any = curr.nodes.get(__UpperCamelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted lowercase_ : Dict = _delete(__UpperCamelCase ,__UpperCamelCase ,index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self ,__UpperCamelCase ,0 ) def lowercase__( __SCREAMING_SNAKE_CASE : TrieNode , __SCREAMING_SNAKE_CASE : str ): """simple docstring""" if node.is_leaf: print(UpperCamelCase__ , end=' ' ) for key, value in node.nodes.items(): print_words(UpperCamelCase__ , word + key ) def lowercase__( ): """simple docstring""" lowercase_ : Tuple = '''banana bananas bandana band apple all beast'''.split() lowercase_ : str = TrieNode() root.insert_many(UpperCamelCase__ ) # print_words(root, "") assert all(root.find(UpperCamelCase__ ) for word in words ) assert root.find('banana' ) assert not root.find('bandanas' ) assert not root.find('apps' ) assert root.find('apple' ) assert root.find('all' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool ): """simple docstring""" print(str(UpperCamelCase__ ) , 'works!' if passes else 'doesn\'t work :(' ) def lowercase__( ): """simple docstring""" assert test_trie() def lowercase__( ): """simple docstring""" print_results('Testing trie functionality' , test_trie() ) if __name__ == "__main__": main()
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __SCREAMING_SNAKE_CASE ='pt' elif is_tf_available(): __SCREAMING_SNAKE_CASE ='tf' else: __SCREAMING_SNAKE_CASE ='jax' class UpperCamelCase ( _lowerCamelCase , unittest.TestCase ): """simple docstring""" lowercase = PerceiverTokenizer lowercase = False def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' super().setUp() lowercase_ : Union[str, Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=20 ,__UpperCamelCase=5 ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): try: lowercase_ : Optional[int] = tokenizer.decode([i] ,clean_up_tokenization_spaces=__UpperCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase_ : int = list(filter(lambda __UpperCamelCase : re.match(r'^[ a-zA-Z]+$' ,t[1] ) ,__UpperCamelCase ) ) lowercase_ : int = list(filter(lambda __UpperCamelCase : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=__UpperCamelCase ) ,__UpperCamelCase ) ) if max_length is not None and len(__UpperCamelCase ) > max_length: lowercase_ : List[Any] = toks[:max_length] if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0: while len(__UpperCamelCase ) < min_length: lowercase_ : Tuple = toks + toks # toks_str = [t[1] for t in toks] lowercase_ : Optional[int] = [t[0] for t in toks] # Ensure consistency lowercase_ : Optional[Any] = tokenizer.decode(__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ) if " " not in output_txt and len(__UpperCamelCase ) > 1: lowercase_ : str = ( tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=__UpperCamelCase ) + ' ' + tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=__UpperCamelCase ) ) if with_prefix_space: lowercase_ : str = ' ' + output_txt lowercase_ : List[str] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) return output_txt, output_ids def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.perceiver_tokenizer lowercase_ : int = 'Unicode €.' lowercase_ : List[str] = tokenizer(__UpperCamelCase ) lowercase_ : Union[str, Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] ,__UpperCamelCase ) # decoding lowercase_ : Optional[Any] = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,'[CLS]Unicode €.[SEP]' ) lowercase_ : Dict = tokenizer('e è é ê ë' ) lowercase_ : str = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] ,__UpperCamelCase ) # decoding lowercase_ : Optional[int] = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,'[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) ,'[CLS]e è é ê ë[SEP]' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Any = self.perceiver_tokenizer lowercase_ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off lowercase_ : List[str] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on lowercase_ : Optional[Any] = tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors=__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) if FRAMEWORK != "jax": lowercase_ : int = list(batch.input_ids.numpy()[0] ) else: lowercase_ : Any = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertEqual((2, 38) ,batch.input_ids.shape ) self.assertEqual((2, 38) ,batch.attention_mask.shape ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.perceiver_tokenizer lowercase_ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowercase_ : str = tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors=__UpperCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' ,__UpperCamelCase ) self.assertIn('attention_mask' ,__UpperCamelCase ) self.assertNotIn('decoder_input_ids' ,__UpperCamelCase ) self.assertNotIn('decoder_attention_mask' ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : str = self.perceiver_tokenizer lowercase_ : Union[str, Any] = [ 'Summary of the text.', 'Another summary.', ] lowercase_ : List[str] = tokenizer( text_target=__UpperCamelCase ,max_length=32 ,padding='max_length' ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase ) self.assertEqual(32 ,targets['input_ids'].shape[1] ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length ,42 ) # Now let's start the test lowercase_ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase_ : Union[str, Any] = tempfile.mkdtemp() lowercase_ : Optional[int] = ' He is very happy, UNwant\u00E9d,running' lowercase_ : Dict = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.__class__.from_pretrained(__UpperCamelCase ) lowercase_ : List[str] = after_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) shutil.rmtree(__UpperCamelCase ) lowercase_ : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase_ : int = tempfile.mkdtemp() lowercase_ : List[str] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) lowercase_ : List[Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) lowercase_ : Union[str, Any] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) lowercase_ : Any = tokenizer.__class__.from_pretrained(__UpperCamelCase ) lowercase_ : int = after_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertIn('new_additional_special_token' ,after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length ,42 ) lowercase_ : List[Any] = tokenizer.__class__.from_pretrained(__UpperCamelCase ,model_max_length=43 ) self.assertEqual(tokenizer.model_max_length ,43 ) shutil.rmtree(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Any = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file: lowercase_ : Any = json.load(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file: lowercase_ : int = json.load(__UpperCamelCase ) lowercase_ : Tuple = [f'''<extra_id_{i}>''' for i in range(125 )] lowercase_ : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] lowercase_ : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(__UpperCamelCase ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile: json.dump(__UpperCamelCase ,__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile: json.dump(__UpperCamelCase ,__UpperCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase_ : Optional[int] = tokenizer_class.from_pretrained( __UpperCamelCase ,) self.assertIn( 'an_additional_special_token' ,tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] ,tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) ,) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase_ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' ,lstrip=__UpperCamelCase )] lowercase_ : List[str] = tokenizer_class.from_pretrained( __UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,) self.assertIn('a_new_additional_special_token' ,tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] ,tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Dict = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) ,'�' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = self.get_tokenizers(fast=__UpperCamelCase ,do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): lowercase_ : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_string(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __SCREAMING_SNAKE_CASE ={ '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class UpperCamelCase ( lowerCamelCase__ ): lowercase = 'facebook/nllb-200-distilled-600M' lowercase = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) lowercase = 'translator' lowercase = AutoTokenizer lowercase = AutoModelForSeqaSeqLM lowercase = LANGUAGE_CODES lowercase = ['text', 'text', 'text'] lowercase = ['text'] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(f'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(f'''{tgt_lang} is not a supported language.''' ) lowercase_ : List[Any] = self.lang_to_code[src_lang] lowercase_ : Tuple = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __UpperCamelCase ,return_tensors='pt' ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' return self.model.generate(**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=__UpperCamelCase )
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = XLMProphetNetTokenizer lowercase = False lowercase = True def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ : Tuple = XLMProphetNetTokenizer(lowerCamelCase_ ,keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = '[PAD]' lowercase_ : Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) ,lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) ,lowerCamelCase_ ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'[PAD]' ) self.assertEqual(vocab_keys[1] ,'[CLS]' ) self.assertEqual(vocab_keys[-1] ,'j' ) self.assertEqual(len(lowerCamelCase_ ) ,1012 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,1012 ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Any = XLMProphetNetTokenizer(lowerCamelCase_ ,keep_accents=lowerCamelCase_ ) lowercase_ : Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCamelCase_ ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) lowercase_ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) lowercase_ : int = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] ,) lowercase_ : str = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] ,) @cached_property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Optional[Any] = 'Hello World!' lowercase_ : int = [3_5389, 6672, 49, 2] self.assertListEqual(lowerCamelCase_ ,self.big_tokenizer.encode(lowerCamelCase_ ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = parent lowercase_ : List[str] = batch_size lowercase_ : Dict = seq_length lowercase_ : int = is_training lowercase_ : Union[str, Any] = use_input_mask lowercase_ : Tuple = use_token_type_ids lowercase_ : int = use_labels lowercase_ : Optional[int] = vocab_size lowercase_ : str = hidden_size lowercase_ : Optional[Any] = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : List[str] = type_vocab_size lowercase_ : int = type_sequence_label_size lowercase_ : List[Any] = initializer_range lowercase_ : Any = num_labels lowercase_ : Union[str, Any] = num_choices lowercase_ : Tuple = scope def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Dict = None if self.use_token_type_ids: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowercase_ : str = None lowercase_ : Union[str, Any] = None lowercase_ : List[Any] = None if self.use_labels: lowercase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[Any] = NystromformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[str] = model(lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ) lowercase_ : str = model(lowercase_ ,token_type_ids=lowercase_ ) lowercase_ : Optional[int] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = NystromformerForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : List[Any] = model(lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = NystromformerForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[int] = model( lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,start_positions=lowercase_ ,end_positions=lowercase_ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[Any] = self.num_labels lowercase_ : Optional[int] = NystromformerForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[int] = model(lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = self.num_labels lowercase_ : Tuple = NystromformerForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[int] = model(lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.num_choices lowercase_ : Dict = NystromformerForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : Optional[int] = model( lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Tuple = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = config_and_inputs lowercase_ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowercase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': NystromformerModel, 'fill-mask': NystromformerForMaskedLM, 'question-answering': NystromformerForQuestionAnswering, 'text-classification': NystromformerForSequenceClassification, 'token-classification': NystromformerForTokenClassification, 'zero-shot': NystromformerForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[str] = NystromformerModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=lowercase_ ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Tuple = type self.model_tester.create_and_check_model(*lowercase_ ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase_ ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = NystromformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : int = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) lowercase_ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowercase_ : int = model(lowercase_ )[0] lowercase_ : str = torch.Size((1, 6, 768) ) self.assertEqual(output.shape ,lowercase_ ) lowercase_ : int = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,lowercase_ ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = 'the [MASK] of Belgium is Brussels' lowercase_ : Dict = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) lowercase_ : str = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) lowercase_ : Tuple = tokenizer(lowercase_ ,return_tensors='pt' ) with torch.no_grad(): lowercase_ : Tuple = model(encoding.input_ids ).logits lowercase_ : List[Any] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(lowercase_ ) ,'capital' )
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" from math import factorial, radians def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str = 18 , __SCREAMING_SNAKE_CASE : Optional[Any] = 10 ): lowercase_ : int = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0) # Converting from degrees to radians lowercase_ : Any = radians(A_ ) lowercase_ : Dict = angle_in_radians lowercase_ : Any = 3 lowercase_ : List[Any] = -1 for _ in range(A_ ): result += (b * (angle_in_radians**a)) / factorial(A_ ) lowercase_ : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(A_ , A_ ) if __name__ == "__main__": __import__("doctest").testmod()
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: lowercase_ : List[str] = git.Repo(search_parent_directories=_lowercase ) lowercase_ : List[Any] = { 'repo_id': str(_lowercase ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(_lowercase , 'git_log.json' ) , 'w' ) as f: json.dump(_lowercase , _lowercase , indent=4 ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if params.n_gpu <= 0: lowercase_ : List[str] = 0 lowercase_ : Any = -1 lowercase_ : str = True lowercase_ : Union[str, Any] = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase_ : str = int(os.environ['WORLD_SIZE'] ) lowercase_ : Any = int(os.environ['N_GPU_NODE'] ) lowercase_ : Any = int(os.environ['RANK'] ) # number of nodes / node ID lowercase_ : List[str] = params.world_size // params.n_gpu_per_node lowercase_ : List[str] = params.global_rank // params.n_gpu_per_node lowercase_ : Tuple = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase_ : Tuple = 1 lowercase_ : Union[str, Any] = 0 lowercase_ : Any = 0 lowercase_ : Dict = 0 lowercase_ : Union[str, Any] = 1 lowercase_ : Any = 1 lowercase_ : Union[str, Any] = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase_ : Any = params.node_id == 0 and params.local_rank == 0 lowercase_ : Union[str, Any] = params.n_nodes > 1 # summary lowercase_ : Any = F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' , backend='nccl' , ) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : list ): lowercase_ : Any = len(__SCREAMING_SNAKE_CASE ) for i in range(1 , __SCREAMING_SNAKE_CASE ): lowercase_ : Dict = collection[i] lowercase_ : Optional[int] = 0 lowercase_ : List[Any] = i - 1 while low <= high: lowercase_ : List[Any] = (low + high) // 2 if val < collection[mid]: lowercase_ : Union[str, Any] = mid - 1 else: lowercase_ : Union[str, Any] = mid + 1 for j in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 ): lowercase_ : Any = collection[j - 1] lowercase_ : Any = val return collection if __name__ == "__main__": __SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip() __SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow __SCREAMING_SNAKE_CASE =False class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase=32 ) -> Union[str, Any]: '''simple docstring''' set_seed(0 ) lowercase_ : str = UNetaDModel(sample_size=lowerCamelCase_ ,in_channels=3 ,out_channels=3 ) lowercase_ : int = torch.optim.SGD(model.parameters() ,lr=0.0001 ) return model, optimizer @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : str = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowercase_ : str = DDPMScheduler( num_train_timesteps=1000 ,beta_start=0.0001 ,beta_end=0.02 ,beta_schedule='linear' ,clip_sample=lowerCamelCase_ ,) lowercase_ : List[str] = DDIMScheduler( num_train_timesteps=1000 ,beta_start=0.0001 ,beta_end=0.02 ,beta_schedule='linear' ,clip_sample=lowerCamelCase_ ,) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowercase_ : Any = [torch.randn((4, 3, 32, 32) ).clip(-1 ,1 ).to(lowerCamelCase_ ) for _ in range(4 )] lowercase_ : Dict = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase_ ) for _ in range(4 )] lowercase_ : Any = [torch.randint(0 ,1000 ,(4,) ).long().to(lowerCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowercase_ , lowercase_ : Any = self.get_model_optimizer(resolution=32 ) model.train().to(lowerCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowercase_ : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] ) lowercase_ : Dict = model(lowerCamelCase_ ,timesteps[i] ).sample lowercase_ : Union[str, Any] = torch.nn.functional.mse_loss(lowerCamelCase_ ,noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowercase_ , lowercase_ : Optional[Any] = self.get_model_optimizer(resolution=32 ) model.train().to(lowerCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowercase_ : int = ddim_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] ) lowercase_ : Any = model(lowerCamelCase_ ,timesteps[i] ).sample lowercase_ : int = torch.nn.functional.mse_loss(lowerCamelCase_ ,noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-5 ) ) self.assertTrue(torch.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-5 ) )
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __SCREAMING_SNAKE_CASE =[ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): for attribute in key.split('.' ): lowercase_ : Tuple = getattr(__lowerCAmelCase , __lowerCAmelCase ) if weight_type is not None: lowercase_ : Any = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape else: lowercase_ : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ : List[str] = value elif weight_type == "weight_g": lowercase_ : Optional[int] = value elif weight_type == "weight_v": lowercase_ : List[Any] = value elif weight_type == "bias": lowercase_ : Optional[Any] = value else: lowercase_ : int = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[str] = [] lowercase_ : Any = fairseq_model.state_dict() lowercase_ : int = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase_ : Optional[Any] = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase_ : List[str] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: lowercase_ : Union[str, Any] = True if "*" in mapped_key: lowercase_ : Dict = name.split(__lowerCAmelCase )[0].split('.' )[-2] lowercase_ : Optional[int] = mapped_key.replace('*' , __lowerCAmelCase ) if "weight_g" in name: lowercase_ : List[Any] = '''weight_g''' elif "weight_v" in name: lowercase_ : Tuple = '''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: lowercase_ : Union[str, Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase_ : Any = '''weight''' else: lowercase_ : Union[str, Any] = None set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : str = full_name.split('conv_layers.' )[-1] lowercase_ : Optional[int] = name.split('.' ) lowercase_ : Optional[int] = int(items[0] ) lowercase_ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ : str = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ : List[Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ : Any = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=None ): lowercase_ : Any = torch.load(__lowerCAmelCase ) lowercase_ : str = WavLMConfigOrig(checkpoint['cfg'] ) lowercase_ : List[str] = WavLMOrig(__lowerCAmelCase ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: lowercase_ : Tuple = WavLMConfig.from_pretrained(__lowerCAmelCase ) else: lowercase_ : List[str] = WavLMConfig() lowercase_ : List[Any] = WavLMModel(__lowerCAmelCase ) recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase ) hf_wavlm.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) @add_end_docstrings( a__ , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , ) class UpperCamelCase ( a__ ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> np.ndarray: '''simple docstring''' if self.framework == "tf": lowercase_ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowercase_ : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=lowerCAmelCase__ ) else: raise ValueError('Unsupported framework' ) return masked_index def _UpperCAmelCase ( self ,__UpperCamelCase ) -> np.ndarray: '''simple docstring''' lowercase_ : str = self.get_masked_index(lowerCAmelCase__ ) lowercase_ : Any = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' ,self.model.base_model_prefix ,f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' ,) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowerCAmelCase__ ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Dict[str, GenericTensor]: '''simple docstring''' if return_tensors is None: lowercase_ : Optional[Any] = self.framework lowercase_ : Any = self.tokenizer(lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ) self.ensure_exactly_one_mask_token(lowerCAmelCase__ ) return model_inputs def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Tuple = self.model(**lowerCAmelCase__ ) lowercase_ : str = model_inputs["input_ids"] return model_outputs def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=5 ,__UpperCamelCase=None ) -> Union[str, Any]: '''simple docstring''' if target_ids is not None and target_ids.shape[0] < top_k: lowercase_ : Tuple = target_ids.shape[0] lowercase_ : int = model_outputs["input_ids"][0] lowercase_ : Union[str, Any] = model_outputs["logits"] if self.framework == "tf": lowercase_ : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowercase_ : Tuple = outputs.numpy() lowercase_ : Dict = outputs[0, masked_index, :] lowercase_ : Dict = stable_softmax(lowerCAmelCase__ ,axis=-1 ) if target_ids is not None: lowercase_ : Any = tf.gather_nd(tf.squeeze(lowerCAmelCase__ ,0 ) ,target_ids.reshape(-1 ,1 ) ) lowercase_ : Optional[int] = tf.expand_dims(lowerCAmelCase__ ,0 ) lowercase_ : Optional[int] = tf.math.top_k(lowerCAmelCase__ ,k=lowerCAmelCase__ ) lowercase_ : Optional[Any] = topk.values.numpy(), topk.indices.numpy() else: lowercase_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=lowerCAmelCase__ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowercase_ : List[str] = outputs[0, masked_index, :] lowercase_ : Optional[Any] = logits.softmax(dim=-1 ) if target_ids is not None: lowercase_ : List[str] = probs[..., target_ids] lowercase_ : List[Any] = probs.topk(lowerCAmelCase__ ) lowercase_ : Tuple = [] lowercase_ : Any = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ): lowercase_ : Union[str, Any] = [] for v, p in zip(_values ,_predictions ): # Copy is important since we're going to modify this array in place lowercase_ : Union[str, Any] = input_ids.numpy().copy() if target_ids is not None: lowercase_ : Union[str, Any] = target_ids[p].tolist() lowercase_ : Optional[Any] = p # Filter padding out: lowercase_ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowercase_ : Tuple = self.tokenizer.decode(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ ) lowercase_ : List[Any] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(lowerCAmelCase__ ) result.append(lowerCAmelCase__ ) if single_mask: return result[0] return result def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Dict: '''simple docstring''' if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase_ : List[Any] = [targets] try: lowercase_ : int = self.tokenizer.get_vocab() except Exception: lowercase_ : int = {} lowercase_ : Optional[int] = [] for target in targets: lowercase_ : Tuple = vocab.get(lowerCAmelCase__ ,lowerCAmelCase__ ) if id_ is None: lowercase_ : Optional[int] = self.tokenizer( lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ ,max_length=1 ,truncation=lowerCAmelCase__ ,)["input_ids"] if len(lowerCAmelCase__ ) == 0: logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' 'We cannot replace it with anything meaningful, ignoring it' ) continue lowercase_ : Union[str, Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) lowercase_ : List[Any] = list(set(lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) == 0: raise ValueError('At least one target must be provided when passed.' ) lowercase_ : Union[str, Any] = np.array(lowerCAmelCase__ ) return target_ids def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int: '''simple docstring''' lowercase_ : List[Any] = {} if targets is not None: lowercase_ : Optional[int] = self.get_target_ids(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase_ : Any = target_ids if top_k is not None: lowercase_ : int = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' ,self.model.base_model_prefix ,'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self ,__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : List[str] = super().__call__(lowerCAmelCase__ ,**lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1: return outputs[0] return outputs
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Tuple = os.path.join(args.tf_model_dir , 'parameters.json' ) lowercase_ : int = json.loads(open(__SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('.pt' ): lowercase_ : Dict = args.output + '.pt' lowercase_ : Any = OrderedDict() with tf.device('/CPU:0' ): lowercase_ : List[str] = tf.train.load_checkpoint(args.tf_model_dir ) lowercase_ : List[str] = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase_ : str = reader.get_tensor(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ): continue if key_name.startswith('pasts/' ): if key_name.startswith('pasts/mlp' ): lowercase_ : int = int(key_name[9] ) elif key_name.startswith('pasts/out' ): lowercase_ : List[Any] = 8 lowercase_ : Any = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Tuple = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.startswith('model/moe' ): lowercase_ : List[Any] = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/switch_gating/kernel' ): lowercase_ : Tuple = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player lowercase_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : str = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/softmlp/kernel' ): lowercase_ : Tuple = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player lowercase_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : str = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ): lowercase_ : str = key_name[-9:-7] for i in range(16 ): lowercase_ : Optional[Any] = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer) lowercase_ : Any = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.startswith('model/mlp' ): lowercase_ : List[str] = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/p1/kernel' ): lowercase_ : Tuple = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/p1/bias' ): lowercase_ : Dict = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player lowercase_ : Any = vnp.copy() # same because it is one dimensional lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/p2/kernel' ): lowercase_ : str = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player lowercase_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Union[str, Any] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/p2/bias' ): lowercase_ : List[str] = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player lowercase_ : int = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.startswith('model/ln' ): lowercase_ : Any = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): lowercase_ : Tuple = 'model.blocks.%d.feed_forward.norm.bias' % player lowercase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional lowercase_ : Union[str, Any] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/g' ): lowercase_ : Tuple = 'model.blocks.%d.feed_forward.norm.weight' % player lowercase_ : str = vnp.copy() # same because it is one dimensional lowercase_ : Any = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.startswith('model/att' ): lowercase_ : int = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/qkv/kernel' ): lowercase_ : Optional[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase_ : Any = state[:, 0, :, :] lowercase_ : List[Any] = state[:, 1, :, :] lowercase_ : Optional[int] = state[:, 2, :, :] lowercase_ : Optional[Any] = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Dict = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[str] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player lowercase_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player lowercase_ : Any = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/o/kernel' ): lowercase_ : Dict = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player lowercase_ : Optional[int] = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.startswith('model/an' ): lowercase_ : Any = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): lowercase_ : Tuple = 'model.blocks.%d.self_attn.norm.bias' % player lowercase_ : int = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.endswith('/g' ): lowercase_ : Dict = 'model.blocks.%d.self_attn.norm.weight' % player lowercase_ : Optional[int] = vnp.copy() # same because it is one dimensional lowercase_ : Dict = torch.tensor(__SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('model/wte' ) or key_name.startswith('model/wpe' ) or key_name.startswith('model/ete' ) ): lowercase_ : int = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[ key_name[-3:] ] lowercase_ : str = 'model.%s.weight' % nlayer lowercase_ : int = vnp.copy() # same in embedded lowercase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE ) if key_name.startswith('model/wte' ): lowercase_ : str = 'lm_head.weight' lowercase_ : List[str] = vnp.copy() # same in embedded lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name.startswith('model/wob' ): lowercase_ : Union[str, Any] = 'final_logits_bias' lowercase_ : Tuple = vnp.copy() # same in embedded lowercase_ : List[Any] = state.reshape((1, -1) ) lowercase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase_ : Optional[int] = 'model.last_project.weight' lowercase_ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase_ : int = 'model.last_project.bias' lowercase_ : List[Any] = vnp.copy() # same because it is one dimensional lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_tf_gptsan_to_pt(args)
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" from __future__ import annotations class __lowerCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Dict = data lowercase_ : Tuple = None lowercase_ : List[str] = None def lowercase__( __SCREAMING_SNAKE_CASE : Node | None ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None ): return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowercase__( __SCREAMING_SNAKE_CASE : Node ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase__( ): # Main function for testing. lowercase_ : List[str] = Node(1 ) lowercase_ : List[str] = Node(2 ) lowercase_ : Optional[Any] = Node(3 ) lowercase_ : Any = Node(4 ) lowercase_ : List[Any] = Node(5 ) lowercase_ : Optional[Any] = Node(6 ) lowercase_ : str = Node(7 ) lowercase_ : Tuple = Node(8 ) lowercase_ : Optional[int] = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('Tree is: ' ) display(snake_case__ ) if __name__ == "__main__": main()
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __SCREAMING_SNAKE_CASE ='\\n\n' __SCREAMING_SNAKE_CASE ='\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' __SCREAMING_SNAKE_CASE ='\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 16 ,__UpperCamelCase = True ,__UpperCamelCase=None ) -> Tuple: '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowercase_ : Optional[Any] = """cuda""" else: lowercase_ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu""" lowercase_ : int = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase_ : Optional[Any] = model.to(SCREAMING_SNAKE_CASE_ ) lowercase_ : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowercase_ : int = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(SCREAMING_SNAKE_CASE_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowercase_ : Any = model.config.max_length - 1 else: lowercase_ : int = model.config.max_length lowercase_ : List[Any] = tokenizer( SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ,padding=SCREAMING_SNAKE_CASE_ ,truncation=SCREAMING_SNAKE_CASE_ ,max_length=SCREAMING_SNAKE_CASE_ ,return_tensors='pt' ,return_attention_mask=SCREAMING_SNAKE_CASE_ ,).to(SCREAMING_SNAKE_CASE_ ) lowercase_ : int = encodings["""input_ids"""] lowercase_ : Optional[int] = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowercase_ : Union[str, Any] = [] lowercase_ : int = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 ,len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) ): lowercase_ : str = min(start_index + batch_size ,len(SCREAMING_SNAKE_CASE_ ) ) lowercase_ : Any = encoded_texts[start_index:end_index] lowercase_ : str = attn_masks[start_index:end_index] if add_start_token: lowercase_ : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ : Dict = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 ) lowercase_ : Optional[int] = torch.cat( [torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE_ ), attn_mask] ,dim=1 ) lowercase_ : Dict = encoded_batch with torch.no_grad(): lowercase_ : Tuple = model(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ).logits lowercase_ : Optional[int] = out_logits[..., :-1, :].contiguous() lowercase_ : Any = labels[..., 1:].contiguous() lowercase_ : List[str] = attn_mask[..., 1:].contiguous() lowercase_ : List[Any] = torch.expa( (loss_fct(shift_logits.transpose(1 ,2 ) ,SCREAMING_SNAKE_CASE_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE_ )}
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" from __future__ import annotations import math def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(SCREAMING_SNAKE_CASE__ ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) def lowercase__( ) -> None: lowercase_ : List[str] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowercase_ : Optional[Any] = math.log(len(SCREAMING_SNAKE_CASE__ ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class UpperCamelCase ( _a , unittest.TestCase ): lowercase = PegasusTokenizer lowercase = PegasusTokenizerFast lowercase = True lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ : Tuple = PegasusTokenizer(__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' return ("This is a test", "This is a test") def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = '</s>' lowercase_ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<pad>' ) self.assertEqual(vocab_keys[1] ,'</s>' ) self.assertEqual(vocab_keys[-1] ,'v' ) self.assertEqual(len(__UpperCamelCase ) ,1103 ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,1103 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ : Any = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) lowercase_ : List[str] = rust_tokenizer([raw_input_str] ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ).input_ids[0] lowercase_ : Dict = py_tokenizer([raw_input_str] ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : int = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowercase_ : int = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' lowercase_ : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowercase_ : str = tokenizer([raw_input_str] ,return_tensors=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : int = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowercase_ : str = 'To ensure a smooth flow of bank resolutions.' lowercase_ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowercase_ : Dict = tokenizer([raw_input_str] ,return_tensors=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = ['This is going to be way too long.' * 150, 'short example'] lowercase_ : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny'] lowercase_ : Optional[int] = self._large_tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors='pt' ) lowercase_ : int = self._large_tokenizer( text_target=__UpperCamelCase ,max_length=5 ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask. @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : str = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,) @require_sentencepiece @require_tokenizers class UpperCamelCase ( _a , unittest.TestCase ): lowercase = PegasusTokenizer lowercase = PegasusTokenizerFast lowercase = True lowercase = True def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase_ : List[str] = PegasusTokenizer(__UpperCamelCase ,offset=0 ,mask_token_sent=__UpperCamelCase ,mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' return ("This is a test", "This is a test") def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowercase_ : Any = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) lowercase_ : Any = rust_tokenizer([raw_input_str] ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ).input_ids[0] lowercase_ : Union[str, Any] = py_tokenizer([raw_input_str] ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ).input_ids[0] self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = ['This is going to be way too long.' * 1000, 'short example'] lowercase_ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny'] lowercase_ : int = self._large_tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors='pt' ) lowercase_ : Optional[Any] = self._large_tokenizer( text_target=__UpperCamelCase ,max_length=5 ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors='pt' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCamelCase ) == 2 # input_ids, attention_mask. def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) lowercase_ : List[Any] = self._large_tokenizer(__UpperCamelCase ).input_ids self.assertListEqual( __UpperCamelCase ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class UpperCamelCase ( unittest.TestCase , __a ): def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = load_tool('text-classification' ) self.tool.setup() lowercase_ : Dict = load_tool('text-classification' ,remote=UpperCamelCase__ ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.tool('That\'s quite cool' ,['positive', 'negative'] ) self.assertEqual(UpperCamelCase__ ,'positive' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Tuple = self.remote_tool('That\'s quite cool' ,['positive', 'negative'] ) self.assertEqual(UpperCamelCase__ ,'positive' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = self.tool(text='That\'s quite cool' ,labels=['positive', 'negative'] ) self.assertEqual(UpperCamelCase__ ,'positive' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = self.remote_tool(text='That\'s quite cool' ,labels=['positive', 'negative'] ) self.assertEqual(UpperCamelCase__ ,'positive' )
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=_lowerCAmelCase ): lowercase = ["keras_nlp"] def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self ,['keras_nlp'] )
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : dict ): lowercase_ : int = set() # edges = list of graph's edges lowercase_ : List[Any] = get_edges(a__ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowercase_ , lowercase_ : Tuple = edges.pop() chosen_vertices.add(a__ ) chosen_vertices.add(a__ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(a__ ) return chosen_vertices def lowercase__( __SCREAMING_SNAKE_CASE : dict ): lowercase_ : Tuple = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __SCREAMING_SNAKE_CASE =subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") __SCREAMING_SNAKE_CASE =( subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split() ) __SCREAMING_SNAKE_CASE ="|".join(sys.argv[1:]) __SCREAMING_SNAKE_CASE =re.compile(rf"^({joined_dirs}).*?\.py$") __SCREAMING_SNAKE_CASE =[x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): lowercase_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('Quantized models are not supported.' ) lowercase_ : List[Any] = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , A__ ) if matches: lowercase_ : List[str] = float(matches[1] ) lowercase_ : List[str] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". lowercase_ : int = 10_01 lowercase_ : Optional[Any] = """imagenet-1k-id2label.json""" lowercase_ : Optional[int] = """huggingface/label-files""" lowercase_ : Tuple = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) lowercase_ : int = {int(A__ ) + 1: v for k, v in idalabel.items()} lowercase_ : str = """background""" lowercase_ : Optional[int] = idalabel lowercase_ : int = {v: k for k, v in idalabel.items()} return config def lowercase__( ): lowercase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase_ : str = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : Optional[int] = get_mobilenet_va_config(A__ ) # Load 🤗 model lowercase_ : Optional[int] = MobileNetVaForImageClassification(A__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A__ , A__ , A__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor lowercase_ : Dict = MobileNetVaImageProcessor( crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , ) lowercase_ : List[str] = image_processor(images=prepare_img() , return_tensors='pt' ) lowercase_ : Tuple = model(**A__ ) lowercase_ : Union[str, Any] = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": lowercase_ : Any = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": lowercase_ : str = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: lowercase_ : Optional[int] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 ) Path(A__ ).mkdir(exist_ok=A__ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A__ ) if push_to_hub: print('Pushing to the hub...' ) lowercase_ : List[str] = """google/""" + model_name image_processor.push_to_hub(A__ ) model.push_to_hub(A__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" from __future__ import annotations def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[str] | None = None , __SCREAMING_SNAKE_CASE : dict[str, float] | None = None , __SCREAMING_SNAKE_CASE : bool = False , ): """simple docstring""" lowercase_ : Tuple = cipher_alphabet or [chr(lowerCAmelCase__ ) for i in range(97 , 1_23 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowercase_ : int = { '''a''': 0.0_8497, '''b''': 0.0_1492, '''c''': 0.0_2202, '''d''': 0.0_4253, '''e''': 0.1_1162, '''f''': 0.0_2228, '''g''': 0.0_2015, '''h''': 0.0_6094, '''i''': 0.0_7546, '''j''': 0.0_0153, '''k''': 0.0_1292, '''l''': 0.0_4025, '''m''': 0.0_2406, '''n''': 0.0_6749, '''o''': 0.0_7507, '''p''': 0.0_1929, '''q''': 0.0_0095, '''r''': 0.0_7587, '''s''': 0.0_6327, '''t''': 0.0_9356, '''u''': 0.0_2758, '''v''': 0.0_0978, '''w''': 0.0_2560, '''x''': 0.0_0150, '''y''': 0.0_1994, '''z''': 0.0_0077, } else: # Custom frequencies dictionary lowercase_ : Union[str, Any] = frequencies_dict if not case_sensitive: lowercase_ : Any = ciphertext.lower() # Chi squared statistic values lowercase_ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(lowerCAmelCase__ ) ): lowercase_ : Tuple = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowercase_ : Optional[int] = (alphabet_letters.index(letter.lower() ) - shift) % len( lowerCAmelCase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowercase_ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowercase_ : Optional[int] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowercase_ : Tuple = decrypted_with_shift.lower().count(lowerCAmelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowercase_ : Any = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowercase_ : Optional[int] = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowercase_ : Optional[Any] = decrypted_with_shift.count(lowerCAmelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowercase_ : Dict = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowercase_ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowercase_ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(__SCREAMING_SNAKE_CASE : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowercase_ : int = min( lowerCAmelCase__ , key=lowerCAmelCase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( lowercase_ ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCamelCase ( lowercase_ ): """simple docstring""" lowercase = "WhisperFeatureExtractor" lowercase = "WhisperTokenizer" def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' super().__init__(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[str] = self.feature_extractor lowercase_ : Optional[int] = False def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ) -> List[Any]: '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=__UpperCamelCase ,language=__UpperCamelCase ,no_timestamps=__UpperCamelCase ) def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[str]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Optional[int] = kwargs.pop('audio' ,__UpperCamelCase ) lowercase_ : List[Any] = kwargs.pop('sampling_rate' ,__UpperCamelCase ) lowercase_ : Dict = kwargs.pop('text' ,__UpperCamelCase ) if len(__UpperCamelCase ) > 0: lowercase_ : str = args[0] lowercase_ : Dict = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: lowercase_ : Union[str, Any] = self.feature_extractor(__UpperCamelCase ,*__UpperCamelCase ,sampling_rate=__UpperCamelCase ,**__UpperCamelCase ) if text is not None: lowercase_ : List[Any] = self.tokenizer(__UpperCamelCase ,**__UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: lowercase_ : List[Any] = encodings['input_ids'] return inputs def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Any: '''simple docstring''' return self.tokenizer.decode(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase="np" ) -> Any: '''simple docstring''' return self.tokenizer.get_prompt_ids(__UpperCamelCase ,return_tensors=__UpperCamelCase )
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __SCREAMING_SNAKE_CASE =datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): lowercase = None def lowercase__( __SCREAMING_SNAKE_CASE : "pyspark.sql.DataFrame" , __SCREAMING_SNAKE_CASE : List[int] , ): import pyspark def generate_fn(): lowercase_ : Tuple = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: lowercase_ : Optional[Any] = df_with_partition_id.select('*' ).where(F'''part_id = {partition_id}''' ).drop('part_id' ) lowercase_ : List[str] = partition_df.collect() lowercase_ : Optional[int] = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=None ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = df lowercase_ : str = partition_order or range(self.df.rdd.getNumPartitions() ) lowercase_ : Optional[Any] = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ) -> int: '''simple docstring''' yield from self.generate_examples_fn() def _UpperCAmelCase ( self ,__UpperCamelCase ) -> "SparkExamplesIterable": '''simple docstring''' lowercase_ : List[Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a__ ) return SparkExamplesIterable(self.df ,partition_order=a__ ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> "SparkExamplesIterable": '''simple docstring''' lowercase_ : List[str] = self.split_shard_indices_by_worker(a__ ,a__ ) return SparkExamplesIterable(self.df ,partition_order=a__ ) @property def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): lowercase = SparkConfig def __init__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' import pyspark lowercase_ : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate() lowercase_ : int = df lowercase_ : Tuple = working_dir super().__init__( cache_dir=a__ ,config_name=str(self.df.semanticHash() ) ,**a__ ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' def create_cache_and_write_probe(__UpperCamelCase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=a__ ) lowercase_ : List[Any] = os.path.join(self._cache_dir ,'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a__ ,'a' ) return [probe_file] if self._spark.conf.get('spark.master' ,'' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowercase_ : Optional[int] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(a__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' import pyspark def get_arrow_batch_size(__UpperCamelCase ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) lowercase_ : List[str] = self.df.count() lowercase_ : Any = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowercase_ : List[Any] = ( self.df.limit(a__ ) .repartition(1 ) .mapInArrow(a__ ,'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowercase_ : List[str] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowercase_ : Union[str, Any] = min(a__ ,int(approx_total_size / max_shard_size ) ) lowercase_ : int = self.df.repartition(a__ ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: '''simple docstring''' import pyspark lowercase_ : str = ParquetWriter if file_format == 'parquet' else ArrowWriter lowercase_ : List[str] = os.path.join(self._working_dir ,os.path.basename(a__ ) ) if self._working_dir else fpath lowercase_ : List[str] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowercase_ : Optional[int] = self.config.features lowercase_ : int = self._writer_batch_size lowercase_ : int = self._fs.storage_options def write_arrow(__UpperCamelCase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowercase_ : Tuple = pyspark.TaskContext().taskAttemptId() lowercase_ : Any = next(a__ ,a__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=['task_id', 'num_examples', 'num_bytes'] ,) lowercase_ : Union[str, Any] = 0 lowercase_ : Tuple = writer_class( features=a__ ,path=working_fpath.replace('SSSSS' ,f'''{shard_id:05d}''' ).replace('TTTTT' ,f'''{task_id:05d}''' ) ,writer_batch_size=a__ ,storage_options=a__ ,embed_local_files=a__ ,) lowercase_ : Union[str, Any] = pa.Table.from_batches([first_batch] ) writer.write_table(a__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowercase_ , lowercase_ : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,) shard_id += 1 lowercase_ : List[str] = writer_class( features=writer._features ,path=working_fpath.replace('SSSSS' ,f'''{shard_id:05d}''' ).replace('TTTTT' ,f'''{task_id:05d}''' ) ,writer_batch_size=a__ ,storage_options=a__ ,embed_local_files=a__ ,) lowercase_ : Any = pa.Table.from_batches([batch] ) writer.write_table(a__ ) if writer._num_bytes > 0: lowercase_ , lowercase_ : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=['task_id', 'num_examples', 'num_bytes'] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a__ ) ): lowercase_ : Optional[int] = os.path.join(os.path.dirname(a__ ) ,os.path.basename(a__ ) ) shutil.move(a__ ,a__ ) lowercase_ : List[str] = ( self.df.mapInArrow(a__ ,'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) ,pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) ,pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) ,pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "arrow" ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' self._validate_cache_dir() lowercase_ : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a__ ) lowercase_ : Optional[Any] = not is_remote_filesystem(self._fs ) lowercase_ : Optional[Any] = os.path.join if is_local else posixpath.join lowercase_ : List[Any] = '-TTTTT-SSSSS-of-NNNNN' lowercase_ : Any = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowercase_ : int = path_join(self._output_dir ,a__ ) lowercase_ : List[Any] = 0 lowercase_ : int = 0 lowercase_ : Optional[Any] = 0 lowercase_ : Union[str, Any] = [] lowercase_ : Union[str, Any] = [] for task_id, content in self._prepare_split_single(a__ ,a__ ,a__ ): ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : List[Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a__ ) lowercase_ : Optional[Any] = total_num_examples lowercase_ : int = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowercase_ : Optional[int] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowercase_ : str = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,): rename( a__ ,fpath.replace('SSSSS' ,f'''{shard_id:05d}''' ).replace('TTTTT' ,f'''{task_id:05d}''' ) ,fpath.replace('TTTTT-SSSSS' ,f'''{global_shard_id:05d}''' ).replace('NNNNN' ,f'''{total_shards:05d}''' ) ,) lowercase_ : Dict = [] lowercase_ : Union[str, Any] = 0 for i in range(len(a__ ) ): lowercase_ , lowercase_ : Any = task_id_and_num_shards[i] for shard_id in range(a__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a__ ,len(a__ ) ).map(lambda __UpperCamelCase : _rename_shard(*a__ ) ).collect() else: # don't use any pattern lowercase_ : List[str] = 0 lowercase_ : Tuple = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' ,f'''{shard_id:05d}''' ).replace('TTTTT' ,f'''{task_id:05d}''' ) ,fpath.replace(a__ ,'' ) ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,) -> SparkExamplesIterable: '''simple docstring''' return SparkExamplesIterable(self.df )
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" import unittest import numpy as np def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] = None , ): lowercase_ : Optional[int] = np.shape(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = np.shape(__SCREAMING_SNAKE_CASE ) lowercase_ : str = np.shape(__SCREAMING_SNAKE_CASE ) if shape_a[0] != shape_b[0]: lowercase_ : Union[str, Any] = ( 'Expected the same number of rows for A and B. ' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) if shape_b[1] != shape_c[1]: lowercase_ : int = ( 'Expected the same number of columns for B and C. ' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) lowercase_ : int = pseudo_inv if a_inv is None: try: lowercase_ : str = np.linalg.inv(__SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> None: '''simple docstring''' lowercase_ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Dict = np.array([[2, 1], [6, 3]] ) lowercase_ : Optional[int] = schur_complement(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) lowercase_ : Dict = np.block([[a, b], [b.T, c]] ) lowercase_ : str = np.linalg.det(UpperCamelCase_ ) lowercase_ : Optional[Any] = np.linalg.det(UpperCamelCase_ ) lowercase_ : int = np.linalg.det(UpperCamelCase_ ) self.assertAlmostEqual(UpperCamelCase_ ,det_a * det_s ) def _UpperCAmelCase ( self ) -> None: '''simple docstring''' lowercase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Tuple = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCamelCase_ ): schur_complement(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) def _UpperCAmelCase ( self ) -> None: '''simple docstring''' lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) lowercase_ : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) lowercase_ : Any = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCamelCase_ ): schur_complement(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : Any = b.T lowercase_ : List[Any] = np.sum(np.square(lowerCamelCase_ ) , axis=1 ) lowercase_ : Dict = np.sum(np.square(lowerCamelCase_ ) , axis=0 ) lowercase_ : Optional[int] = np.matmul(lowerCamelCase_ , lowerCamelCase_ ) lowercase_ : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : Tuple = x.reshape(-1 , 3 ) lowercase_ : str = squared_euclidean_distance(lowerCamelCase_ , lowerCamelCase_ ) return np.argmin(lowerCamelCase_ , axis=1 ) class UpperCamelCase ( lowerCamelCase__ ): lowercase = ['pixel_values'] def __init__( self ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BILINEAR ,__UpperCamelCase = True ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> None: '''simple docstring''' super().__init__(**__lowerCamelCase ) lowercase_ : Optional[Any] = size if size is not None else {'''height''': 256, '''width''': 256} lowercase_ : Optional[Any] = get_size_dict(__lowerCamelCase ) lowercase_ : List[str] = np.array(__lowerCamelCase ) if clusters is not None else None lowercase_ : Any = do_resize lowercase_ : List[str] = size lowercase_ : str = resample lowercase_ : List[str] = do_normalize lowercase_ : Union[str, Any] = do_color_quantize def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = PILImageResampling.BILINEAR ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray: '''simple docstring''' lowercase_ : Union[str, Any] = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( __lowerCamelCase ,size=(size['height'], size['width']) ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,) -> np.ndarray: '''simple docstring''' lowercase_ : int = rescale(image=__lowerCamelCase ,scale=1 / 127.5 ,data_format=__lowerCamelCase ) lowercase_ : Union[str, Any] = image - 1 return image def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image: '''simple docstring''' lowercase_ : Any = do_resize if do_resize is not None else self.do_resize lowercase_ : int = size if size is not None else self.size lowercase_ : str = get_size_dict(__lowerCamelCase ) lowercase_ : Dict = resample if resample is not None else self.resample lowercase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize lowercase_ : Any = clusters if clusters is not None else self.clusters lowercase_ : Tuple = np.array(__lowerCamelCase ) lowercase_ : str = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_color_quantize and clusters is None: raise ValueError('Clusters must be specified if do_color_quantize is True.' ) # All transformations expect numpy arrays. lowercase_ : int = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: lowercase_ : Optional[int] = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images] if do_normalize: lowercase_ : Optional[Any] = [self.normalize(image=__lowerCamelCase ) for image in images] if do_color_quantize: lowercase_ : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase ,ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) lowercase_ : Union[str, Any] = np.array(__lowerCamelCase ) lowercase_ : Optional[int] = color_quantize(__lowerCamelCase ,__lowerCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) lowercase_ : Optional[Any] = images.shape[0] lowercase_ : List[str] = images.reshape(__lowerCamelCase ,-1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. lowercase_ : Union[str, Any] = list(__lowerCamelCase ) else: lowercase_ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images] lowercase_ : Tuple = {'''input_ids''': images} return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.json"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } __SCREAMING_SNAKE_CASE ={"mgp-str": 27} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCamelCase ,__UpperCamelCase="[GO]" ,__UpperCamelCase="[GO]" ,__UpperCamelCase="[s]" ,__UpperCamelCase="[GO]" ,**__UpperCamelCase ) -> str: '''simple docstring''' super().__init__( unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,**_A ,) with open(_A ,encoding='utf-8' ) as vocab_handle: lowercase_ : Optional[int] = json.load(_A ) lowercase_ : Union[str, Any] = {v: k for k, v in self.vocab.items()} @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return len(self.vocab ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return dict(self.vocab ,**self.added_tokens_encoder ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = [] for s in text: char_tokens.extend(_A ) return char_tokens def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return self.vocab.get(_A ,self.vocab.get(self.unk_token ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' return self.decoder.get(_A ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_A ): logger.error('Vocabulary path ({}) should be a directory'.format(_A ) ) return lowercase_ : Optional[Any] = os.path.join( _A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(_A ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' ) return (vocab_file,)
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" import logging from transformers import PretrainedConfig __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) __SCREAMING_SNAKE_CASE ={ 'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json', } class UpperCamelCase ( lowerCamelCase__ ): lowercase = 'bertabs' def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=512 ,__UpperCamelCase=6 ,__UpperCamelCase=512 ,__UpperCamelCase=8 ,__UpperCamelCase=512 ,__UpperCamelCase=0.2 ,__UpperCamelCase=6 ,__UpperCamelCase=768 ,__UpperCamelCase=8 ,__UpperCamelCase=2048 ,__UpperCamelCase=0.2 ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' super().__init__(**__snake_case ) lowercase_ : List[str] = vocab_size lowercase_ : List[str] = max_pos lowercase_ : str = enc_layers lowercase_ : Optional[int] = enc_hidden_size lowercase_ : Optional[int] = enc_heads lowercase_ : List[str] = enc_ff_size lowercase_ : Optional[Any] = enc_dropout lowercase_ : Dict = dec_layers lowercase_ : List[str] = dec_hidden_size lowercase_ : str = dec_heads lowercase_ : str = dec_ff_size lowercase_ : Any = dec_dropout
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase = '''xlnet''' lowercase = ['''mems'''] lowercase = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self ,__UpperCamelCase=3_2000 ,__UpperCamelCase=1024 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase=4096 ,__UpperCamelCase="gelu" ,__UpperCamelCase=True ,__UpperCamelCase="bi" ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=-1 ,__UpperCamelCase=False ,__UpperCamelCase="last" ,__UpperCamelCase=True ,__UpperCamelCase="tanh" ,__UpperCamelCase=0.1 ,__UpperCamelCase=5 ,__UpperCamelCase=5 ,__UpperCamelCase=5 ,__UpperCamelCase=1 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = vocab_size lowercase_ : int = d_model lowercase_ : Dict = n_layer lowercase_ : List[str] = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) lowercase_ : Optional[int] = d_model // n_head lowercase_ : Union[str, Any] = ff_activation lowercase_ : Optional[int] = d_inner lowercase_ : str = untie_r lowercase_ : Optional[Any] = attn_type lowercase_ : int = initializer_range lowercase_ : Optional[Any] = layer_norm_eps lowercase_ : Optional[int] = dropout lowercase_ : str = mem_len lowercase_ : Optional[Any] = reuse_len lowercase_ : Tuple = bi_data lowercase_ : Tuple = clamp_len lowercase_ : Any = same_length lowercase_ : Union[str, Any] = summary_type lowercase_ : Optional[int] = summary_use_proj lowercase_ : int = summary_activation lowercase_ : str = summary_last_dropout lowercase_ : Optional[Any] = start_n_top lowercase_ : Union[str, Any] = end_n_top lowercase_ : Tuple = bos_token_id lowercase_ : Optional[Any] = pad_token_id lowercase_ : Union[str, Any] = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' ,A__ ,) lowercase_ : Optional[int] = kwargs['use_cache'] lowercase_ : Dict = use_mems_eval lowercase_ : Tuple = use_mems_train super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class UpperCamelCase ( __a ): def __init__( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = data def __iter__( self ) -> List[Any]: '''simple docstring''' for element in self.data: yield element def lowercase__( __SCREAMING_SNAKE_CASE : Any=True ): lowercase_ : Tuple = Accelerator(even_batches=__SCREAMING_SNAKE_CASE ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any = False ): if iterable: lowercase_ : Tuple = DummyIterableDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) ) else: lowercase_ : Tuple = TensorDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) ) lowercase_ : Union[str, Any] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE ) return dl def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , ): lowercase_ : Dict = create_dataloader(accelerator=__SCREAMING_SNAKE_CASE , dataset_size=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def lowercase__( ): lowercase_ : Optional[int] = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def lowercase__( ): lowercase_ : List[Any] = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE ) verify_dataloader_batch_sizes( __SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def lowercase__( ): lowercase_ : Dict = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = torch.nn.Linear(1 , 1 ) lowercase_ : Union[str, Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) lowercase_ : List[str] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = ddp_model(batch[0].float() ) lowercase_ : int = output.sum() loss.backward() batch_idxs.append(__SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE ) assert "only supported for multi-GPU" in str(w[-1].message ) def lowercase__( ): lowercase_ : List[Any] = True lowercase_ : Optional[int] = False lowercase_ : Tuple = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = torch.nn.Linear(1 , 1 ) lowercase_ : str = accelerator.prepare(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) lowercase_ : List[Any] = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = train_dl.batch_sampler.even_batches lowercase_ : Optional[int] = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def lowercase__( ): lowercase_ : int = True lowercase_ : Union[str, Any] = False lowercase_ : List[str] = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = torch.nn.Linear(1 , 1 ) lowercase_ : Optional[int] = accelerator.prepare(__SCREAMING_SNAKE_CASE ) create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings('ignore' ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ): lowercase_ : int = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def lowercase__( ): lowercase_ : List[str] = create_accelerator() lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 ) lowercase_ : int = accelerator.prepare(__SCREAMING_SNAKE_CASE ) create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE ) with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ): pass assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE ) assert "only supported for map-style datasets" in str(w[-1].message ) def lowercase__( ): lowercase_ : Optional[int] = create_accelerator() accelerator.print('Test that even_batches variable ensures uniform batches across processes' ) test_default_ensures_even_batch_sizes() accelerator.print('Run tests with even_batches disabled' ) test_can_disable_even_batches() accelerator.print('Test joining uneven inputs' ) test_can_join_uneven_inputs() accelerator.print('Test overriding even_batches when joining uneven inputs' ) test_join_can_override_even_batches() accelerator.print('Test overriding even_batches for mixed dataloader types' ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print('Test join with non DDP distributed raises warning' ) lowercase_ : int = accelerator.state.distributed_type lowercase_ : Optional[Any] = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = original_state if __name__ == "__main__": main()
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" from __future__ import annotations def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ): # noqa: E741 while r - l > 1: lowercase_ : Optional[Any] = (l + r) // 2 if v[m] >= key: lowercase_ : int = m else: lowercase_ : str = m # noqa: E741 return r def lowercase__( __SCREAMING_SNAKE_CASE : list[int] ): if len(__SCREAMING_SNAKE_CASE ) == 0: return 0 lowercase_ : Optional[int] = [0] * len(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = 1 lowercase_ : Any = v[0] for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): if v[i] < tail[0]: lowercase_ : str = v[i] elif v[i] > tail[length - 1]: lowercase_ : str = v[i] length += 1 else: lowercase_ : Optional[Any] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
from math import pi def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ): return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" import heapq def lowercase__( __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : List[str] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(UpperCAmelCase__ , [-1 * len(UpperCAmelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices lowercase_ : Dict = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices lowercase_ : Any = heapq.heappop(UpperCAmelCase__ )[1][0] chosen_vertices.add(UpperCAmelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: lowercase_ : Any = elem[1][1].index(UpperCAmelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(UpperCAmelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=2 ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=10 ,__UpperCamelCase=3 ,__UpperCamelCase=32 * 4 ,__UpperCamelCase=32 * 6 ,__UpperCamelCase=4 ,__UpperCamelCase=32 ,) -> str: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Any = batch_size lowercase_ : Any = is_training lowercase_ : Tuple = use_auxiliary_loss lowercase_ : int = num_queries lowercase_ : Optional[Any] = num_channels lowercase_ : Optional[int] = min_size lowercase_ : Optional[int] = max_size lowercase_ : List[str] = num_labels lowercase_ : Optional[Any] = mask_feature_size def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _A ) lowercase_ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_A ) lowercase_ : List[str] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_A ) > 0.5 ).float() lowercase_ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=_A ) > 0.5).long() lowercase_ : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.prepare_config_and_inputs() lowercase_ : Any = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = output.encoder_hidden_states lowercase_ : List[Any] = output.pixel_decoder_hidden_states lowercase_ : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_A ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_A ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_A ) ,config.decoder_config.decoder_layers ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> int: '''simple docstring''' with torch.no_grad(): lowercase_ : Optional[Any] = MaskFormerModel(config=_A ) model.to(_A ) model.eval() lowercase_ : str = model(pixel_values=_A ,pixel_mask=_A ) lowercase_ : Tuple = model(_A ,output_hidden_states=_A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_A ,_A ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = MaskFormerForInstanceSegmentation(config=_A ) model.to(_A ) model.eval() def comm_check_on_output(__UpperCamelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowercase_ : Union[str, Any] = model(pixel_values=_A ,pixel_mask=_A ) lowercase_ : Tuple = model(_A ) comm_check_on_output(_A ) lowercase_ : List[str] = model( pixel_values=_A ,pixel_mask=_A ,mask_labels=_A ,class_labels=_A ) comm_check_on_output(_A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class UpperCamelCase ( snake_case__ , snake_case__ , unittest.TestCase ): lowercase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowercase = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Tuple = MaskFormerModelTester(self ) lowercase_ : Any = ConfigTester(self ,config_class=_A ,has_text_modality=_A ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_A ,**_A ,output_hidden_states=_A ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A ) @unittest.skip(reason='MaskFormer does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='MaskFormer is not a generative model' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='MaskFormer does not use token embeddings' ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(_A ) lowercase_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Optional[int] = [*signature.parameters.keys()] lowercase_ : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_A ) @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: lowercase_ : Optional[Any] = MaskFormerModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = (self.model_tester.min_size,) * 2 lowercase_ : Optional[Any] = { 'pixel_values': torch.randn((2, 3, *size) ,device=_A ), 'mask_labels': torch.randn((2, 10, *size) ,device=_A ), 'class_labels': torch.zeros(2 ,10 ,device=_A ).long(), } lowercase_ : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A ) lowercase_ : List[str] = model(**_A ) self.assertTrue(outputs.loss is not None ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_A ,**_A ,output_hidden_states=_A ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(_A ).to(_A ) lowercase_ : Dict = model(**_A ,output_attentions=_A ) self.assertTrue(outputs.attentions is not None ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowercase_ : Optional[int] = self.all_model_classes[1] lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs() lowercase_ : Dict = model_class(_A ) model.to(_A ) model.train() lowercase_ : Any = model(_A ,mask_labels=_A ,class_labels=_A ).loss loss.backward() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Tuple = self.all_model_classes[1] lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() lowercase_ : List[Any] = True lowercase_ : List[Any] = True lowercase_ : List[Any] = model_class(_A ) model.to(_A ) model.train() lowercase_ : List[str] = model(_A ,mask_labels=_A ,class_labels=_A ) lowercase_ : Dict = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowercase_ : Optional[int] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowercase_ : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowercase_ : Tuple = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE =1E-4 def lowercase__( ): lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' ) if is_vision_available() else None ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[str] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_A ) lowercase_ : List[Any] = self.default_image_processor lowercase_ : int = prepare_img() lowercase_ : List[Any] = image_processor(_A ,return_tensors='pt' ).to(_A ) lowercase_ : Optional[int] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_A ,(1, 3, 800, 1088) ) with torch.no_grad(): lowercase_ : List[Any] = model(**_A ) lowercase_ : Any = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_A ,atol=_A ) ) lowercase_ : Tuple = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_A ,atol=_A ) ) lowercase_ : Optional[int] = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_A ,atol=_A ) ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(_A ) .eval() ) lowercase_ : Optional[int] = self.default_image_processor lowercase_ : int = prepare_img() lowercase_ : Dict = image_processor(_A ,return_tensors='pt' ).to(_A ) lowercase_ : Optional[Any] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_A ,(1, 3, 800, 1088) ) with torch.no_grad(): lowercase_ : int = model(**_A ) # masks_queries_logits lowercase_ : List[str] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowercase_ : Union[str, Any] = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] lowercase_ : Optional[int] = torch.tensor(_A ).to(_A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_A ,atol=_A ) ) # class_queries_logits lowercase_ : str = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowercase_ : Any = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(_A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_A ,atol=_A ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : int = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' ) .to(_A ) .eval() ) lowercase_ : List[str] = self.default_image_processor lowercase_ : str = prepare_img() lowercase_ : Dict = image_processor(_A ,return_tensors='pt' ).to(_A ) lowercase_ : Optional[int] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_A ,(1, 3, 800, 1088) ) with torch.no_grad(): lowercase_ : str = model(**_A ) # masks_queries_logits lowercase_ : Tuple = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowercase_ : Optional[int] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] lowercase_ : List[str] = torch.tensor(_A ).to(_A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_A ,atol=_A ) ) # class_queries_logits lowercase_ : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowercase_ : Tuple = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_A ,atol=_A ) ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' ) .to(_A ) .eval() ) lowercase_ : List[str] = self.default_image_processor lowercase_ : Any = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) lowercase_ : int = inputs['pixel_values'].to(_A ) lowercase_ : Tuple = [el.to(_A ) for el in inputs['mask_labels']] lowercase_ : List[Any] = [el.to(_A ) for el in inputs['class_labels']] with torch.no_grad(): lowercase_ : Union[str, Any] = model(**_A ) self.assertTrue(outputs.loss is not None )
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ): _enforce_args(snake_case__ , snake_case__ ) if n == 0: return 0 lowercase_ : Dict = float('-inf' ) for i in range(1 , n + 1 ): lowercase_ : List[str] = max( snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) ) return max_revue def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ): _enforce_args(snake_case__ , snake_case__ ) lowercase_ : Union[str, Any] = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowercase_ : Union[str, Any] = float('-inf' ) for i in range(1 , n + 1 ): lowercase_ : int = max( snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , ) lowercase_ : Optional[int] = max_revenue return max_rev[n] def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ): _enforce_args(snake_case__ , snake_case__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowercase_ : Dict = [float('-inf' ) for _ in range(n + 1 )] lowercase_ : Optional[int] = 0 for i in range(1 , n + 1 ): lowercase_ : Any = max_rev[i] for j in range(1 , i + 1 ): lowercase_ : int = max(snake_case__ , prices[j - 1] + max_rev[i - j] ) lowercase_ : Optional[Any] = max_revenue_i return max_rev[n] def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ): if n < 0: lowercase_ : Dict = F'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(snake_case__ ) if n > len(snake_case__ ): lowercase_ : Optional[int] = ( 'Each integral piece of rod must have a corresponding price. ' F'''Got n = {n} but length of prices = {len(snake_case__ )}''' ) raise ValueError(snake_case__ ) def lowercase__( ): lowercase_ : List[Any] = [6, 10, 12, 15, 20, 23] lowercase_ : Dict = len(snake_case__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowercase_ : Tuple = 36 lowercase_ : List[str] = top_down_cut_rod(snake_case__ , snake_case__ ) lowercase_ : Any = bottom_up_cut_rod(snake_case__ , snake_case__ ) lowercase_ : int = naive_cut_rod_recursive(snake_case__ , snake_case__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __SCREAMING_SNAKE_CASE ={ "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =["ChineseCLIPFeatureExtractor"] __SCREAMING_SNAKE_CASE =["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: lowercase_ : Union[str, Any] = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) lowercase_ : Optional[Any] = MaskFormerConfig(backbone_config=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok lowercase_ : List[Any] = 8_47 lowercase_ : str = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok lowercase_ : List[str] = 1_50 lowercase_ : int = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok lowercase_ : Any = 1_71 lowercase_ : int = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO lowercase_ : List[str] = 1_33 lowercase_ : List[str] = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok lowercase_ : Optional[Any] = 19 lowercase_ : Optional[int] = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok lowercase_ : Dict = 65 lowercase_ : Optional[Any] = '''mapillary-vistas-id2label.json''' lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : List[Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} return config def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: lowercase_ : int = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> Any: lowercase_ : Tuple = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : str = val def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: lowercase_ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowercase_ : Any = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowercase_ : str = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) lowercase_ : Any = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : Union[str, Any] = in_proj_weight[:dim, :] lowercase_ : Optional[Any] = in_proj_bias[: dim] lowercase_ : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] lowercase_ : Any = in_proj_bias[ dim : dim * 2 ] lowercase_ : Union[str, Any] = in_proj_weight[ -dim :, : ] lowercase_ : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str: lowercase_ : Optional[int] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) lowercase_ : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) lowercase_ : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : int = in_proj_weight[: hidden_size, :] lowercase_ : List[str] = in_proj_bias[:config.hidden_size] lowercase_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] lowercase_ : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2] lowercase_ : Tuple = in_proj_weight[-hidden_size :, :] lowercase_ : List[Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) lowercase_ : int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) lowercase_ : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase_ : List[str] = in_proj_weight[: hidden_size, :] lowercase_ : List[Any] = in_proj_bias[:config.hidden_size] lowercase_ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] lowercase_ : Tuple = in_proj_bias[hidden_size : hidden_size * 2] lowercase_ : str = in_proj_weight[-hidden_size :, :] lowercase_ : List[str] = in_proj_bias[-hidden_size :] # fmt: on def lowercase__( ) -> Dict: lowercase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int = False ) -> Optional[int]: lowercase_ : Optional[int] = get_maskformer_config(__SCREAMING_SNAKE_CASE ) # load original state_dict with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: lowercase_ : Optional[int] = pickle.load(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys lowercase_ : Optional[int] = create_rename_keys(__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config ) read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # update to torch tensors for key, value in state_dict.items(): lowercase_ : int = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # load 🤗 model lowercase_ : Union[str, Any] = MaskFormerForInstanceSegmentation(__SCREAMING_SNAKE_CASE ) model.eval() for name, param in model.named_parameters(): print(__SCREAMING_SNAKE_CASE , param.shape ) lowercase_ : List[str] = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__SCREAMING_SNAKE_CASE ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results lowercase_ : Tuple = prepare_img() if "vistas" in model_name: lowercase_ : Dict = 65 elif "cityscapes" in model_name: lowercase_ : Dict = 6_55_35 else: lowercase_ : Tuple = 2_55 lowercase_ : str = True if '''ade''' in model_name else False lowercase_ : Dict = MaskFormerImageProcessor(ignore_index=__SCREAMING_SNAKE_CASE , reduce_labels=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE ) print('Logits:' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": lowercase_ : List[str] = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you\'d like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ ) lowercase_ : int = downstream_dict['projector.weight'] lowercase_ : Optional[int] = downstream_dict['projector.bias'] lowercase_ : Any = downstream_dict['model.post_net.linear.weight'] lowercase_ : Tuple = downstream_dict['model.post_net.linear.bias'] return model def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : str = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ ) lowercase_ : List[str] = downstream_dict['model.linear.weight'] lowercase_ : Union[str, Any] = downstream_dict['model.linear.bias'] return model def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : Optional[Any] = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ ) lowercase_ : str = downstream_dict['connector.weight'] lowercase_ : Dict = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowercase_ : Optional[int] = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] lowercase_ : List[str] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] lowercase_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] lowercase_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] lowercase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] lowercase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] lowercase_ : Optional[int] = downstream_dict['objective.W'] return model @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : str = torch.load(snake_case_ , map_location='cpu' ) lowercase_ : Tuple = checkpoint['Downstream'] lowercase_ : Tuple = WavaVecaConfig.from_pretrained(snake_case_ ) lowercase_ : int = WavaVecaFeatureExtractor.from_pretrained( snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ ) lowercase_ : Tuple = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): lowercase_ : Any = convert_classification(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('ForAudioFrameClassification' ): lowercase_ : Optional[Any] = convert_diarization(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('ForXVector' ): lowercase_ : int = convert_xvector(snake_case_ , snake_case_ , snake_case_ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: lowercase_ : Optional[int] = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" import qiskit def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" lowercase_ : Tuple = qiskit.Aer.get_backend('aer_simulator' ) lowercase_ : Union[str, Any] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator lowercase_ : Any = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =half_adder(1, 1) print(F"Half Adder Output Qubit Counts: {counts}")
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" from math import pi, sqrt, tan def lowercase__( __SCREAMING_SNAKE_CASE : float ): if side_length < 0: raise ValueError('surface_area_cube() only accepts non-negative values' ) return 6 * side_length**2 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if length < 0 or breadth < 0 or height < 0: raise ValueError('surface_area_cuboid() only accepts non-negative values' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowercase__( __SCREAMING_SNAKE_CASE : float ): if radius < 0: raise ValueError('surface_area_sphere() only accepts non-negative values' ) return 4 * pi * radius**2 def lowercase__( __SCREAMING_SNAKE_CASE : float ): if radius < 0: raise ValueError('surface_area_hemisphere() only accepts non-negative values' ) return 3 * pi * radius**2 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if radius < 0 or height < 0: raise ValueError('surface_area_cone() only accepts non-negative values' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( 'surface_area_conical_frustum() only accepts non-negative values' ) lowercase_ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if radius < 0 or height < 0: raise ValueError('surface_area_cylinder() only accepts non-negative values' ) return 2 * pi * radius * (height + radius) def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if torus_radius < 0 or tube_radius < 0: raise ValueError('surface_area_torus() only accepts non-negative values' ) if torus_radius < tube_radius: raise ValueError( 'surface_area_torus() does not support spindle or self intersecting tori' ) return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if length < 0 or width < 0: raise ValueError('area_rectangle() only accepts non-negative values' ) return length * width def lowercase__( __SCREAMING_SNAKE_CASE : float ): if side_length < 0: raise ValueError('area_square() only accepts non-negative values' ) return side_length**2 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if base < 0 or height < 0: raise ValueError('area_triangle() only accepts non-negative values' ) return (base * height) / 2 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('area_triangle_three_sides() only accepts non-negative values' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('Given three sides do not form a triangle' ) lowercase_ : Optional[int] = (sidea + sidea + sidea) / 2 lowercase_ : Any = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if base < 0 or height < 0: raise ValueError('area_parallelogram() only accepts non-negative values' ) return base * height def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if basea < 0 or basea < 0 or height < 0: raise ValueError('area_trapezium() only accepts non-negative values' ) return 1 / 2 * (basea + basea) * height def lowercase__( __SCREAMING_SNAKE_CASE : float ): if radius < 0: raise ValueError('area_circle() only accepts non-negative values' ) return pi * radius**2 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if radius_x < 0 or radius_y < 0: raise ValueError('area_ellipse() only accepts non-negative values' ) return pi * radius_x * radius_y def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('area_rhombus() only accepts non-negative values' ) return 1 / 2 * diagonal_a * diagonal_a def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3: raise ValueError( 'area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides' ) elif length < 0: raise ValueError( 'area_reg_polygon() only accepts non-negative values as \ length of a side' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(F"Rectangle: {area_rectangle(10, 20) = }") print(F"Square: {area_square(10) = }") print(F"Triangle: {area_triangle(10, 10) = }") print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(F"Parallelogram: {area_parallelogram(10, 20) = }") print(F"Rhombus: {area_rhombus(10, 20) = }") print(F"Trapezium: {area_trapezium(10, 20, 30) = }") print(F"Circle: {area_circle(20) = }") print(F"Ellipse: {area_ellipse(10, 20) = }") print("\nSurface Areas of various geometric shapes: \n") print(F"Cube: {surface_area_cube(20) = }") print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(F"Sphere: {surface_area_sphere(20) = }") print(F"Hemisphere: {surface_area_hemisphere(20) = }") print(F"Cone: {surface_area_cone(10, 20) = }") print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(F"Cylinder: {surface_area_cylinder(10, 20) = }") print(F"Torus: {surface_area_torus(20, 10) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(F"Square: {area_reg_polygon(4, 10) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=10 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=None ,) -> Optional[int]: '''simple docstring''' lowercase_ : Any = size if size is not None else {'shortest_edge': 18} lowercase_ : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowercase_ : List[str] = parent lowercase_ : List[str] = batch_size lowercase_ : Optional[int] = num_channels lowercase_ : Union[str, Any] = num_frames lowercase_ : Union[str, Any] = image_size lowercase_ : List[str] = min_resolution lowercase_ : int = max_resolution lowercase_ : Union[str, Any] = do_resize lowercase_ : Optional[int] = size lowercase_ : str = do_normalize lowercase_ : Tuple = image_mean lowercase_ : Any = image_std lowercase_ : Tuple = crop_size def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = VivitImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = VivitImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_center_crop' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowercase_ : Any = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ) for video in video_inputs: self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) self.assertIsInstance(video[0] ,Image.Image ) # Test not batched input lowercase_ : Any = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : List[Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase ) for video in video_inputs: self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) self.assertIsInstance(video[0] ,np.ndarray ) # Test not batched input lowercase_ : Tuple = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase ) for video in video_inputs: self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) self.assertIsInstance(video[0] ,torch.Tensor ) # Test not batched input lowercase_ : Optional[int] = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Tuple = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" import requests from bsa import BeautifulSoup def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): """simple docstring""" lowercase_ : int = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , 'html.parser' ) lowercase_ : List[str] = soup.findAll('h1' ) lowercase_ : List[str] = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(F"{key}\n{value}\n")
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Optional[int] = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE ) lowercase_ : int = [i[0] for i in r], [i[1] for i in r] lowercase_ : str = list(accumulate(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Union[str, Any] = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
def lowercase__( __SCREAMING_SNAKE_CASE : int | float | str ): try: lowercase_ : List[str] = float(__SCREAMING_SNAKE_CASE ) except ValueError: raise ValueError('Please enter a valid number' ) lowercase_ : List[str] = decimal - int(__SCREAMING_SNAKE_CASE ) if fractional_part == 0: return int(__SCREAMING_SNAKE_CASE ), 1 else: lowercase_ : int = len(str(__SCREAMING_SNAKE_CASE ).split('.' )[1] ) lowercase_ : Any = int(decimal * (10**number_of_frac_digits) ) lowercase_ : int = 10**number_of_frac_digits lowercase_ : str = denominator, numerator while True: lowercase_ : List[str] = dividend % divisor if remainder == 0: break lowercase_ : Tuple = divisor, remainder lowercase_ : Dict = numerator / divisor, denominator / divisor return int(__SCREAMING_SNAKE_CASE ), int(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F"{decimal_to_fraction(2) = }") print(F"{decimal_to_fraction(89.0) = }") print(F"{decimal_to_fraction('67') = }") print(F"{decimal_to_fraction('45.0') = }") print(F"{decimal_to_fraction(1.5) = }") print(F"{decimal_to_fraction('6.25') = }") print(F"{decimal_to_fraction('78td') = }")
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class UpperCamelCase ( lowercase_ ): lowercase = 'van' def __init__( self ,__UpperCamelCase=224 ,__UpperCamelCase=3 ,__UpperCamelCase=[7, 3, 3, 3] ,__UpperCamelCase=[4, 2, 2, 2] ,__UpperCamelCase=[64, 128, 320, 512] ,__UpperCamelCase=[3, 3, 12, 3] ,__UpperCamelCase=[8, 8, 4, 4] ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=1e-2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : List[str] = image_size lowercase_ : Any = num_channels lowercase_ : Dict = patch_sizes lowercase_ : Optional[int] = strides lowercase_ : Any = hidden_sizes lowercase_ : List[str] = depths lowercase_ : Any = mlp_ratios lowercase_ : List[str] = hidden_act lowercase_ : Tuple = initializer_range lowercase_ : Tuple = layer_norm_eps lowercase_ : int = layer_scale_init_value lowercase_ : List[Any] = drop_path_rate lowercase_ : Any = dropout_rate
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __SCREAMING_SNAKE_CASE = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __SCREAMING_SNAKE_CASE = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : int = SavedModel() lowercase_ : Union[str, Any] = [] with open(os.path.join(__SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: lowercase_ : List[str] = json.load(__SCREAMING_SNAKE_CASE )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] ) with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: saved_model.ParseFromString(f.read() ) lowercase_ : Any = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowercase_ : int = sorted(__SCREAMING_SNAKE_CASE ) lowercase_ : str = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__SCREAMING_SNAKE_CASE ) if strict and len(__SCREAMING_SNAKE_CASE ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__SCREAMING_SNAKE_CASE ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__SCREAMING_SNAKE_CASE , sep='\n' ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) __SCREAMING_SNAKE_CASE = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = min(__SCREAMING_SNAKE_CASE ) # min() finds the minimum value lowercase_ : List[str] = max(__SCREAMING_SNAKE_CASE ) # max() finds the maximum value lowercase_ : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowercase_ : Optional[int] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowercase_ : Optional[Any] = 0 for count in range(__SCREAMING_SNAKE_CASE ): while holes[count] > 0: holes[count] -= 1 lowercase_ : Union[str, Any] = count + min_val i += 1 def lowercase__( ): lowercase_ : Optional[Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__SCREAMING_SNAKE_CASE ) print('Sorted order is:' , ' '.join(__SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main()
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" import random def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = [], [], [] for element in data: if element < pivot: less.append(__SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(__SCREAMING_SNAKE_CASE ) else: equal.append(__SCREAMING_SNAKE_CASE ) return less, equal, greater def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ): # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0: return None lowercase_ : Any = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )] lowercase_ : List[Any] = 0 lowercase_ : int = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __SCREAMING_SNAKE_CASE =TypeVar("KEY") __SCREAMING_SNAKE_CASE =TypeVar("VAL") @dataclass(frozen=lowercase_ , slots=lowercase_ ) class UpperCamelCase ( Generic[KEY, VAL] ): lowercase = 4_2 lowercase = 4_2 class UpperCamelCase ( _Item ): def __init__( self ) -> None: '''simple docstring''' super().__init__(__UpperCamelCase ,__UpperCamelCase ) def __bool__( self ) -> bool: '''simple docstring''' return False __SCREAMING_SNAKE_CASE =_DeletedItem() class UpperCamelCase ( MutableMapping[KEY, VAL] ): def __init__( self ,__UpperCamelCase = 8 ,__UpperCamelCase = 0.75 ) -> None: '''simple docstring''' lowercase_ : Optional[Any] = initial_block_size lowercase_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowercase_ : Tuple = capacity_factor lowercase_ : Optional[int] = 0 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' return hash(__UpperCamelCase ) % len(self._buckets ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Tuple = self._buckets[ind] if not stored: lowercase_ : List[str] = _Item(__UpperCamelCase ,__UpperCamelCase ) self._len += 1 return True elif stored.key == key: lowercase_ : Tuple = _Item(__UpperCamelCase ,__UpperCamelCase ) return True else: return False def _UpperCAmelCase ( self ) -> bool: '''simple docstring''' lowercase_ : Any = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowercase_ : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = self._buckets lowercase_ : Tuple = [None] * new_size lowercase_ : Union[str, Any] = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def _UpperCAmelCase ( self ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def _UpperCAmelCase ( self ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Iterator[int]: '''simple docstring''' lowercase_ : str = self._get_bucket_index(__UpperCamelCase ) for _ in range(len(self._buckets ) ): yield ind lowercase_ : List[str] = self._get_next_ind(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> None: '''simple docstring''' for ind in self._iterate_buckets(__UpperCamelCase ): if self._try_set(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): break def __setitem__( self ,__UpperCamelCase ,__UpperCamelCase ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCamelCase ,__UpperCamelCase ) def __delitem__( self ,__UpperCamelCase ) -> None: '''simple docstring''' for ind in self._iterate_buckets(__UpperCamelCase ): lowercase_ : Dict = self._buckets[ind] if item is None: raise KeyError(__UpperCamelCase ) if item is _deleted: continue if item.key == key: lowercase_ : str = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self ,__UpperCamelCase ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(__UpperCamelCase ): lowercase_ : List[Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCamelCase ) def __len__( self ) -> int: '''simple docstring''' return self._len def __iter__( self ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: '''simple docstring''' lowercase_ : str = ' ,'.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __SCREAMING_SNAKE_CASE =Lock() def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowercase_ : Union[str, Any] = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowercase_ : Optional[Any] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowercase_ : Optional[Any] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Optional[int] = [] lowercase_ : List[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowercase_ : Union[str, Any] = Pipe() lowercase_ : int = Pipe() process_array_.append( Process( target=__SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowercase_ : Any = temp_rs lowercase_ : Union[str, Any] = temp_rr for i in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 ): lowercase_ : Dict = Pipe() lowercase_ : Tuple = Pipe() process_array_.append( Process( target=__SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowercase_ : str = temp_rs lowercase_ : int = temp_rr process_array_.append( Process( target=__SCREAMING_SNAKE_CASE , args=( len(__SCREAMING_SNAKE_CASE ) - 1, arr[len(__SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : Any = result_pipe[p][0].recv() process_array_[p].join() return arr def lowercase__( ): lowercase_ : Optional[int] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = odd_even_transposition(__SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE =[ "small", "small-base", "medium", "medium-base", "intermediate", "intermediate-base", "large", "large-base", "xlarge", "xlarge-base", ] __SCREAMING_SNAKE_CASE ={ "vocab_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json", "funnel-transformer/small-base": ( "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json" ), "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json", "funnel-transformer/large-base": ( "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json" ), "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json" ), }, } __SCREAMING_SNAKE_CASE ={F"funnel-transformer/{name}": 512 for name in _model_names} __SCREAMING_SNAKE_CASE ={F"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = FunnelTokenizer lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = 2 def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<sep>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<cls>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase="##" ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' super().__init__( __UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,clean_text=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,wordpieces_prefix=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars ): lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) ) lowercase_ : Any = do_lower_case lowercase_ : List[Any] = strip_accents lowercase_ : Optional[int] = tokenize_chinese_chars lowercase_ : List[str] = normalizer_class(**__UpperCamelCase ) lowercase_ : Dict = do_lower_case def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Dict = [self.sep_token_id] lowercase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Optional[Any] = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase )
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE ={ "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =["RemBertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =["RemBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __SCREAMING_SNAKE_CASE ="base_with_context" def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) lowercase_ : Optional[Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): lowercase_ : Dict = weights[F'''layers_{lyr_num}'''] lowercase_ : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) lowercase_ : Any = ly_weight['attention'] lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : List[Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ): lowercase_ : Dict = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): lowercase_ : Any = weights[F'''layers_{lyr_num}'''] lowercase_ : List[str] = ly_weight['attention'] lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : Dict = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) lowercase_ : str = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) lowercase_ : str = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowercase_ : Optional[int] = weights[F'''layers_{lyr_num}'''] lowercase_ : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) lowercase_ : int = ly_weight['self_attention'] lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : List[str] = ly_weight['MultiHeadDotProductAttention_0'] lowercase_ : int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : int = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : str = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : int = checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowercase_ : Union[str, Any] = jnp.tree_util.tree_map(onp.array , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] lowercase_ : str = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) lowercase_ : Union[str, Any] = inference.parse_training_gin_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = inference.InferenceModel(args.checkpoint_path , __SCREAMING_SNAKE_CASE ) lowercase_ : str = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) lowercase_ : str = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) lowercase_ : Optional[Any] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) lowercase_ : List[str] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowercase_ : Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = load_decoder(ta_checkpoint['target']['decoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) lowercase_ : Dict = SpectrogramDiffusionPipeline( notes_encoder=__SCREAMING_SNAKE_CASE , continuous_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , melgan=__SCREAMING_SNAKE_CASE , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F"{MODEL}/checkpoint_500000", type=str, required=False, help="Path to the original jax model checkpoint.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() main(args)
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase__( __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : str = args.pruning_method lowercase_ : Dict = args.threshold lowercase_ : Tuple = args.model_name_or_path.rstrip('/' ) lowercase_ : Union[str, Any] = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) lowercase_ : Any = torch.load(os.path.join(__SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) lowercase_ : str = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase_ : int = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: lowercase_ : List[str] = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: lowercase_ : int = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": lowercase_ : List[str] = MagnitudeBinarizer.apply(inputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase_ : Tuple = name[:-6] lowercase_ : Any = model[F'''{prefix_}mask_scores'''] lowercase_ : int = TopKBinarizer.apply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase_ : List[Any] = name[:-6] lowercase_ : Union[str, Any] = model[F'''{prefix_}mask_scores'''] lowercase_ : List[str] = ThresholdBinarizer.apply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase_ : List[str] = name[:-6] lowercase_ : Dict = model[F'''{prefix_}mask_scores'''] lowercase_ : str = -0.1, 1.1 lowercase_ : Any = torch.sigmoid(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = s * (r - l) + l lowercase_ : Union[str, Any] = s_bar.clamp(min=0.0 , max=1.0 ) lowercase_ : List[Any] = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: lowercase_ : Dict = os.path.join( os.path.dirname(__SCREAMING_SNAKE_CASE ) , F'''bertarized_{os.path.basename(__SCREAMING_SNAKE_CASE )}''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): shutil.copytree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __SCREAMING_SNAKE_CASE =parser.parse_args() main(args)
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = filter(lambda __SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() ) lowercase_ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] ) return params __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ): if metric == "rouge2": lowercase_ : Union[str, Any] = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": lowercase_ : List[str] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": lowercase_ : Dict = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) lowercase_ : Dict = ModelCheckpoint( dirpath=__SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ): return EarlyStopping( monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , ) class __lowerCamelCase ( pl.Callback ): def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Dict = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__UpperCamelCase ) @rank_zero_only def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=True ) -> None: '''simple docstring''' logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowercase_ : str = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results lowercase_ : Optional[int] = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase_ : List[Any] = od / 'test_results.txt' lowercase_ : int = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase_ : int = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' lowercase_ : Any = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=__UpperCamelCase ) generations_file.parent.mkdir(exist_ok=__UpperCamelCase ) with open(__UpperCamelCase ,'a+' ) as writer: for key in sorted(__UpperCamelCase ): if key in ["log", "progress_bar", "preds"]: continue lowercase_ : Tuple = metrics[key] if isinstance(__UpperCamelCase ,torch.Tensor ): lowercase_ : Union[str, Any] = val.item() lowercase_ : Tuple = f'''{key}: {val:.6f}\n''' writer.write(__UpperCamelCase ) if not save_generations: return if "preds" in metrics: lowercase_ : Any = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(__UpperCamelCase ) @rank_zero_only def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: lowercase_ : Any = pl_module.model.model.num_parameters() except AttributeError: lowercase_ : str = pl_module.model.num_parameters() lowercase_ : List[Any] = count_trainable_parameters(__UpperCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} ) @rank_zero_only def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' save_json(pl_module.metrics ,pl_module.metrics_save_path ) return self._write_logs(__UpperCamelCase ,__UpperCamelCase ,'test' ) @rank_zero_only def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' save_json(pl_module.metrics ,pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __SCREAMING_SNAKE_CASE ={ "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __SCREAMING_SNAKE_CASE ={ "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = SqueezeBertTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> int: '''simple docstring''' super().__init__( __UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars ): lowercase_ : List[str] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) ) lowercase_ : List[str] = do_lower_case lowercase_ : str = strip_accents lowercase_ : Optional[Any] = tokenize_chinese_chars lowercase_ : Optional[Any] = normalizer_class(**__UpperCamelCase ) lowercase_ : Any = do_lower_case def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int: '''simple docstring''' lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : List[str] = [self.sep_token_id] lowercase_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Optional[int] = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase )
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = XLMTokenizer lowercase = False def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowercase_ : Dict = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(__UpperCamelCase ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = 'lower newer' lowercase_ : Any = 'lower newer' return input_text, output_text def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = XLMTokenizer(self.vocab_file ,self.merges_file ) lowercase_ : str = 'lower' lowercase_ : Union[str, Any] = ['low', 'er</w>'] lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Tuple = tokens + ['<unk>'] lowercase_ : str = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowercase_ : Union[str, Any] = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCamelCase ) lowercase_ : Tuple = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" from PIL import Image def lowercase__( __SCREAMING_SNAKE_CASE : Image , __SCREAMING_SNAKE_CASE : float ): def brightness(__SCREAMING_SNAKE_CASE : int ) -> float: return 1_28 + level + (c - 1_28) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __SCREAMING_SNAKE_CASE =change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class UpperCamelCase ( lowercase_ ): lowercase = 'poolformer' def __init__( self ,__UpperCamelCase=3 ,__UpperCamelCase=16 ,__UpperCamelCase=16 ,__UpperCamelCase=3 ,__UpperCamelCase=4.0 ,__UpperCamelCase=[2, 2, 6, 2] ,__UpperCamelCase=[64, 128, 320, 512] ,__UpperCamelCase=[7, 3, 3, 3] ,__UpperCamelCase=[4, 2, 2, 2] ,__UpperCamelCase=[2, 1, 1, 1] ,__UpperCamelCase=4 ,__UpperCamelCase=0.0 ,__UpperCamelCase="gelu" ,__UpperCamelCase=True ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.02 ,**__UpperCamelCase ,) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = num_channels lowercase_ : Dict = patch_size lowercase_ : Union[str, Any] = stride lowercase_ : List[Any] = padding lowercase_ : List[str] = pool_size lowercase_ : Any = hidden_sizes lowercase_ : str = mlp_ratio lowercase_ : str = depths lowercase_ : List[str] = patch_sizes lowercase_ : Tuple = strides lowercase_ : str = num_encoder_blocks lowercase_ : Optional[Any] = drop_path_rate lowercase_ : List[Any] = hidden_act lowercase_ : Any = use_layer_scale lowercase_ : Tuple = layer_scale_init_value lowercase_ : int = initializer_range super().__init__(**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = version.parse('1.11' ) @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: '''simple docstring''' return 2e-3
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __SCREAMING_SNAKE_CASE =[ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" for attribute in key.split('.' ): lowercase_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if weight_type is not None: lowercase_ : str = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape else: lowercase_ : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ : Tuple = value elif weight_type == "weight_g": lowercase_ : Union[str, Any] = value elif weight_type == "weight_v": lowercase_ : List[Any] = value elif weight_type == "bias": lowercase_ : Tuple = value else: lowercase_ : Dict = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" lowercase_ : Any = [] lowercase_ : Any = fairseq_model.state_dict() lowercase_ : List[str] = hf_model.feature_extractor lowercase_ : Optional[int] = hf_model.adapter for name, value in fairseq_dict.items(): lowercase_ : int = False if "conv_layers" in name: load_conv_layer( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) lowercase_ : Optional[Any] = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: lowercase_ : Optional[int] = True if "*" in mapped_key: lowercase_ : List[str] = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2] lowercase_ : Union[str, Any] = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE ) if "weight_g" in name: lowercase_ : Union[str, Any] = 'weight_g' elif "weight_v" in name: lowercase_ : Dict = 'weight_v' elif "bias" in name: lowercase_ : Optional[int] = 'bias' elif "weight" in name: lowercase_ : List[Any] = 'weight' else: lowercase_ : List[Any] = None set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(__SCREAMING_SNAKE_CASE ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" lowercase_ : Any = full_name.split('conv_layers.' )[-1] lowercase_ : Optional[Any] = name.split('.' ) lowercase_ : str = int(items[0] ) lowercase_ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ : Optional[Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ : Any = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" lowercase_ : Tuple = full_name.split('adaptor.' )[-1] lowercase_ : Any = name.split('.' ) if items[1].isdigit(): lowercase_ : int = int(items[1] ) else: lowercase_ : Any = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' lowercase_ : Optional[Any] = value logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' lowercase_ : Optional[Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' lowercase_ : Union[str, Any] = value logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' lowercase_ : Dict = value logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' lowercase_ : int = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' lowercase_ : str = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Union[str, Any] = emb.weight.shape lowercase_ : Dict = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = emb.weight.data return lin_layer @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , ): """simple docstring""" lowercase_ : List[Any] = WavaVecaConfig.from_pretrained( __SCREAMING_SNAKE_CASE , add_adapter=__SCREAMING_SNAKE_CASE , adapter_stride=__SCREAMING_SNAKE_CASE , adapter_kernel_size=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , output_hidden_size=__SCREAMING_SNAKE_CASE , ) lowercase_ : Dict = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) # load model lowercase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) lowercase_ : int = model[0].eval() # load feature extractor lowercase_ : Any = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE ) # set weights for wav2vec2 encoder lowercase_ : Tuple = WavaVecaModel(__SCREAMING_SNAKE_CASE ) recursively_load_weights_wavaveca(model.encoder , __SCREAMING_SNAKE_CASE ) # load decoder weights lowercase_ : int = MBartForCausalLM(__SCREAMING_SNAKE_CASE ) lowercase_ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__SCREAMING_SNAKE_CASE ) logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowercase_ : List[str] = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = False lowercase_ : List[str] = MBartaaTokenizer(__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = hf_wavavec.config.to_dict() lowercase_ : Dict = tokenizer.pad_token_id lowercase_ : List[str] = tokenizer.bos_token_id lowercase_ : Any = tokenizer.eos_token_id lowercase_ : Optional[int] = 'mbart50' lowercase_ : List[str] = 'wav2vec2' lowercase_ : str = tokenizer.eos_token_id lowercase_ : Any = 25_00_04 lowercase_ : Union[str, Any] = tokenizer.eos_token_id lowercase_ : Dict = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCamelCase ( lowercase_ ): lowercase = 'Salesforce/blip-image-captioning-base' lowercase = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) lowercase = 'image_captioner' lowercase = AutoModelForVisionaSeq lowercase = ['image'] lowercase = ['text'] def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self ,['vision'] ) super().__init__(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return self.pre_processor(images=__UpperCamelCase ,return_tensors='pt' ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.model.generate(**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return self.pre_processor.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )[0].strip()
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __SCREAMING_SNAKE_CASE ={ "configuration_efficientnet": [ "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig", "EfficientNetOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =["EfficientNetImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" from math import ceil def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : List[Any] = list(range(0 , __SCREAMING_SNAKE_CASE ) ) lowercase_ : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check lowercase_ : Optional[int] = [] for i in device_map_blocks: if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__SCREAMING_SNAKE_CASE ) # Missing blocks lowercase_ : List[Any] = [i for i in blocks if i not in device_map_blocks] lowercase_ : Optional[int] = [i for i in device_map_blocks if i not in blocks] if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(__SCREAMING_SNAKE_CASE ) ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Optional[Any] = list(range(__SCREAMING_SNAKE_CASE ) ) lowercase_ : List[str] = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) ) lowercase_ : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" from sklearn.metrics import fa_score import datasets __SCREAMING_SNAKE_CASE ="\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" __SCREAMING_SNAKE_CASE ="\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" __SCREAMING_SNAKE_CASE ="\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=1 ,__UpperCamelCase="binary" ,__UpperCamelCase=None ) -> Any: '''simple docstring''' lowercase_ : List[str] = fa_score( __UpperCamelCase ,__UpperCamelCase ,labels=__UpperCamelCase ,pos_label=__UpperCamelCase ,average=__UpperCamelCase ,sample_weight=__UpperCamelCase ) return {"f1": float(__UpperCamelCase ) if score.size == 1 else score}
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path __SCREAMING_SNAKE_CASE ="src/transformers" # Matches is_xxx_available() __SCREAMING_SNAKE_CASE =re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __SCREAMING_SNAKE_CASE =re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __SCREAMING_SNAKE_CASE =re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __SCREAMING_SNAKE_CASE =re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __SCREAMING_SNAKE_CASE =re.compile("^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __SCREAMING_SNAKE_CASE =re.compile("^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __SCREAMING_SNAKE_CASE =re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __SCREAMING_SNAKE_CASE =re.compile(r"^\s*try:") # Catches a line with else: __SCREAMING_SNAKE_CASE =re.compile(r"^\s*else:") def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None: return None lowercase_ : Tuple = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase_ : str = f.readlines() lowercase_ : Any = 0 while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure lowercase_ : Optional[Any] = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: lowercase_ : Union[str, Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0] lowercase_ : List[Any] = re.findall('\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue lowercase_ : Optional[Any] = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: lowercase_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0] objects.extend(__SCREAMING_SNAKE_CASE ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 lowercase_ : int = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. lowercase_ : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): lowercase_ : str = lines[line_index] if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None: lowercase_ : Dict = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) lowercase_ : Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0] objects.extend(__SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None: lowercase_ : Dict = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) lowercase_ : Optional[int] = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0] objects.extend(__SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 lowercase_ : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowercase_ : int = [] while ( line_index < len(__SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): lowercase_ : List[Any] = lines[line_index] lowercase_ : List[str] = _re_import.search(__SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowercase_ : Dict = {'none': objects} # Let's continue with backend-specific objects while line_index < len(__SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. lowercase_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ : int = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): lowercase_ : str = lines[line_index] lowercase_ : Dict = _re_import.search(__SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowercase_ : List[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): def find_duplicates(__SCREAMING_SNAKE_CASE : Union[str, Any] ): return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowercase_ : int = [] for key in import_dict_objects.keys(): lowercase_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowercase_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowercase_ : Any = 'base imports' if key == 'none' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def lowercase__( ): lowercase_ : Union[str, Any] = [] for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ): if "__init__.py" in files: lowercase_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) lowercase_ : Optional[int] = parse_init(__SCREAMING_SNAKE_CASE ) if objects is not None: lowercase_ : Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: lowercase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) ) def lowercase__( ): lowercase_ : Any = [] for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(__SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0: continue lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Optional[int] = short_path.replace(os.path.sep , '.' ) submodules.append(__SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Tuple = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(__SCREAMING_SNAKE_CASE ) return submodules __SCREAMING_SNAKE_CASE =[ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", ] def lowercase__( ): # This is to make sure the transformers module imported is the one in the repo. lowercase_ : List[str] = importlib.util.spec_from_file_location( 'transformers' , os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowercase_ : List[str] = spec.loader.load_module() lowercase_ : str = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__SCREAMING_SNAKE_CASE ) > 0: lowercase_ : Any = '\n'.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F'''{list_of_modules}\n''' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "nielsr/canine-s": 2048, } # Unicode defines 1,114,112 total “codepoints” __SCREAMING_SNAKE_CASE =111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py __SCREAMING_SNAKE_CASE =0 __SCREAMING_SNAKE_CASE =0XE0_00 __SCREAMING_SNAKE_CASE =0XE0_01 __SCREAMING_SNAKE_CASE =0XE0_02 __SCREAMING_SNAKE_CASE =0XE0_03 __SCREAMING_SNAKE_CASE =0XE0_04 # Maps special codepoints to human-readable names. __SCREAMING_SNAKE_CASE ={ # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. __SCREAMING_SNAKE_CASE ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase ( lowercase_ ): lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=chr(__UpperCamelCase ) ,__UpperCamelCase=False ,__UpperCamelCase=2048 ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else bos_token lowercase_ : Optional[Any] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else eos_token lowercase_ : List[str] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else sep_token lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else cls_token lowercase_ : int = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token super().__init__( bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,model_max_length=__UpperCamelCase ,**__UpperCamelCase ,) # Creates a mapping for looking up the IDs of special symbols. lowercase_ : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowercase_ : Tuple = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowercase_ : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowercase_ : Any = UNICODE_VOCAB_SIZE lowercase_ : Optional[Any] = len(self._special_codepoints ) @property def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return self._unicode_vocab_size def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return list(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' try: return ord(__UpperCamelCase ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__UpperCamelCase ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' return "".join(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : str = [self.sep_token_id] lowercase_ : Tuple = [self.cls_token_id] lowercase_ : str = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase ) lowercase_ : int = [1] + ([0] * len(__UpperCamelCase )) + [1] if token_ids_a is not None: result += ([0] * len(__UpperCamelCase )) + [1] return result def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] lowercase_ : List[Any] = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> str: '''simple docstring''' return ()
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase ( lowercase_ ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase ,'width_multiplier' ) ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=64 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase="swish" ,__UpperCamelCase=3 ,__UpperCamelCase=32 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=10 ,__UpperCamelCase=None ,__UpperCamelCase=0.25 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,) -> Tuple: '''simple docstring''' lowercase_ : str = parent lowercase_ : Tuple = batch_size lowercase_ : Dict = image_size lowercase_ : Optional[int] = patch_size lowercase_ : int = num_channels lowercase_ : Optional[Any] = make_divisible(512 * width_multiplier ,divisor=8 ) lowercase_ : List[str] = hidden_act lowercase_ : List[Any] = conv_kernel_size lowercase_ : Dict = output_stride lowercase_ : List[Any] = classifier_dropout_prob lowercase_ : List[Any] = use_labels lowercase_ : Union[str, Any] = is_training lowercase_ : List[Any] = num_labels lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = scope lowercase_ : Tuple = width_multiplier lowercase_ : Optional[Any] = ffn_dropout lowercase_ : List[str] = attn_dropout def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Optional[Any] = None lowercase_ : List[Any] = None if self.use_labels: lowercase_ : Any = ids_tensor([self.batch_size] ,self.num_labels ) lowercase_ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) lowercase_ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Any = MobileViTVaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : str = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = self.num_labels lowercase_ : Tuple = MobileViTVaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : str = model(__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = self.num_labels lowercase_ : List[Any] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) lowercase_ : Any = model(__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : str = self.prepare_config_and_inputs() lowercase_ : Tuple = config_and_inputs lowercase_ : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = MobileViTVaModelTester(self ) lowercase_ : Union[str, Any] = MobileViTVaConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Any = model_class(__UpperCamelCase ) lowercase_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : int = [*signature.parameters.keys()] lowercase_ : str = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : Tuple = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : List[str] = outputs.hidden_states lowercase_ : str = 5 self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowercase_ : Dict = 2 for i in range(len(__UpperCamelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,) divisor *= 2 self.assertEqual(self.model_tester.output_stride ,divisor // 2 ) lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : int = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Union[str, Any] = MobileViTVaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase__( ): lowercase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( __UpperCamelCase ) lowercase_ : List[str] = self.default_image_processor lowercase_ : Tuple = prepare_img() lowercase_ : List[str] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : int = model(**__UpperCamelCase ) # verify the logits lowercase_ : Any = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : Dict = model.to(__UpperCamelCase ) lowercase_ : List[str] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : Tuple = prepare_img() lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : str = model(**__UpperCamelCase ) lowercase_ : Optional[Any] = outputs.logits # verify the logits lowercase_ : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] ,device=__UpperCamelCase ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : Optional[int] = model.to(__UpperCamelCase ) lowercase_ : Dict = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : List[str] = prepare_img() lowercase_ : List[Any] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : str = model(**__UpperCamelCase ) lowercase_ : Optional[Any] = outputs.logits.detach().cpu() lowercase_ : Any = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase ,target_sizes=[(50, 60)] ) lowercase_ : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape ,__UpperCamelCase ) lowercase_ : str = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase ) lowercase_ : Optional[int] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape ,__UpperCamelCase )
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCamelCase ( lowercase_ ): lowercase = ['image_processor', 'feature_extractor'] lowercase = 'TvltImageProcessor' lowercase = 'TvltFeatureExtractor' def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' super().__init__(image_processor=__UpperCamelCase ,feature_extractor=__UpperCamelCase ) lowercase_ : int = image_processor lowercase_ : str = feature_extractor def __call__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=False ,__UpperCamelCase=False ,*__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.' ) lowercase_ : Dict = None if images is not None: lowercase_ : int = self.image_processor(__UpperCamelCase ,mask_pixel=__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) if images_mixed is not None: lowercase_ : str = self.image_processor(__UpperCamelCase ,is_mixed=__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) if audio is not None: lowercase_ : Optional[Any] = self.feature_extractor( __UpperCamelCase ,*__UpperCamelCase ,sampling_rate=__UpperCamelCase ,mask_audio=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Union[str, Any] = {} if audio is not None: output_dict.update(__UpperCamelCase ) if images is not None: output_dict.update(__UpperCamelCase ) if images_mixed_dict is not None: output_dict.update(__UpperCamelCase ) return output_dict @property def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : str = self.image_processor.model_input_names lowercase_ : str = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCamelCase ( lowercase_ ): lowercase = 'M-CLIP' def __init__( self ,__UpperCamelCase=1024 ,__UpperCamelCase=768 ,**__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : List[str] = transformerDimSize lowercase_ : List[Any] = imageDimSize super().__init__(**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = MCLIPConfig def __init__( self ,__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) -> List[Any]: '''simple docstring''' super().__init__(__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : str = XLMRobertaModel(__UpperCamelCase ) lowercase_ : Tuple = torch.nn.Linear( in_features=config.transformerDimensions ,out_features=config.numDims ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.transformer(input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase )[0] lowercase_ : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__UpperCamelCase ), embs
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class UpperCamelCase ( lowercase_ ): lowercase = 'instructblip_vision_model' def __init__( self ,__UpperCamelCase=1408 ,__UpperCamelCase=6144 ,__UpperCamelCase=39 ,__UpperCamelCase=16 ,__UpperCamelCase=224 ,__UpperCamelCase=14 ,__UpperCamelCase="gelu" ,__UpperCamelCase=1e-6 ,__UpperCamelCase=0.0 ,__UpperCamelCase=1e-10 ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : Union[str, Any] = hidden_size lowercase_ : Optional[Any] = intermediate_size lowercase_ : Optional[Any] = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : int = patch_size lowercase_ : List[Any] = image_size lowercase_ : Optional[int] = initializer_range lowercase_ : List[Any] = attention_dropout lowercase_ : Dict = layer_norm_eps lowercase_ : List[str] = hidden_act lowercase_ : Optional[int] = qkv_bias @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__UpperCamelCase ) lowercase_ : str = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": lowercase_ : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCamelCase ,**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = 'instructblip_qformer' def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=2 ,__UpperCamelCase=1408 ,**__UpperCamelCase ,) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : str = vocab_size lowercase_ : int = hidden_size lowercase_ : List[str] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = intermediate_size lowercase_ : Union[str, Any] = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : Optional[Any] = max_position_embeddings lowercase_ : List[Any] = initializer_range lowercase_ : Any = layer_norm_eps lowercase_ : Tuple = position_embedding_type lowercase_ : Optional[Any] = cross_attention_frequency lowercase_ : Union[str, Any] = encoder_hidden_size @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__UpperCamelCase ) lowercase_ : List[str] = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": lowercase_ : Union[str, Any] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCamelCase ,**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = 'instructblip' lowercase = True def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=32 ,**__UpperCamelCase ) -> Tuple: '''simple docstring''' super().__init__(**__UpperCamelCase ) if vision_config is None: lowercase_ : Optional[Any] = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: lowercase_ : List[str] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: lowercase_ : Union[str, Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) lowercase_ : Union[str, Any] = InstructBlipVisionConfig(**__UpperCamelCase ) lowercase_ : Any = InstructBlipQFormerConfig(**__UpperCamelCase ) lowercase_ : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt' lowercase_ : Any = CONFIG_MAPPING[text_model_type](**__UpperCamelCase ) lowercase_ : List[Any] = self.text_config.tie_word_embeddings lowercase_ : Optional[int] = self.text_config.is_encoder_decoder lowercase_ : Tuple = num_query_tokens lowercase_ : Any = self.vision_config.hidden_size lowercase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowercase_ : List[Any] = 1.0 lowercase_ : List[Any] = 0.02 @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase_ : Union[str, Any] = self.vision_config.to_dict() lowercase_ : Tuple = self.qformer_config.to_dict() lowercase_ : str = self.text_config.to_dict() lowercase_ : List[Any] = self.__class__.model_type return output
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowercase__( __SCREAMING_SNAKE_CASE : str = "laptop" ): lowercase_ : int = F'''https://www.amazon.in/laptop/s?k={product}''' lowercase_ : str = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } lowercase_ : str = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).text ) # Initialize a Pandas dataframe with the column titles lowercase_ : List[str] = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: lowercase_ : Dict = item.ha.text lowercase_ : Union[str, Any] = 'https://www.amazon.in/' + item.ha.a['href'] lowercase_ : Optional[int] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: lowercase_ : Tuple = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: lowercase_ : Dict = 'Not available' try: lowercase_ : Dict = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: lowercase_ : Optional[int] = '' try: lowercase_ : Dict = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 1_00 ) except ValueError: lowercase_ : Any = float('nan' ) except AttributeError: pass lowercase_ : Optional[int] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowercase_ : List[Any] = ' ' lowercase_ : Tuple = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": __SCREAMING_SNAKE_CASE ="headphones" get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=64 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = parent lowercase_ : List[str] = batch_size lowercase_ : Union[str, Any] = seq_length lowercase_ : str = is_training lowercase_ : Optional[Any] = use_input_mask lowercase_ : List[Any] = use_token_type_ids lowercase_ : Dict = use_labels lowercase_ : Optional[Any] = vocab_size lowercase_ : Optional[int] = hidden_size lowercase_ : List[str] = embedding_size lowercase_ : Optional[Any] = num_hidden_layers lowercase_ : Tuple = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : Optional[int] = attention_probs_dropout_prob lowercase_ : str = max_position_embeddings lowercase_ : Optional[Any] = type_vocab_size lowercase_ : Dict = type_sequence_label_size lowercase_ : int = initializer_range lowercase_ : Any = num_labels lowercase_ : int = num_choices lowercase_ : str = scope def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Union[str, Any] = None if self.use_token_type_ids: lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowercase_ : Tuple = None lowercase_ : Dict = None lowercase_ : List[str] = None if self.use_labels: lowercase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = MobileBertModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ,token_type_ids=__UpperCamelCase ) lowercase_ : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Tuple = MobileBertForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = MobileBertForNextSentencePrediction(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Tuple = MobileBertForPreTraining(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,next_sentence_label=__UpperCamelCase ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Tuple = MobileBertForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,start_positions=__UpperCamelCase ,end_positions=__UpperCamelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.num_labels lowercase_ : Optional[int] = MobileBertForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = self.num_labels lowercase_ : Optional[int] = MobileBertForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : int = self.num_choices lowercase_ : Tuple = MobileBertForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.prepare_config_and_inputs() ( lowercase_ ) : List[str] = config_and_inputs lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': MobileBertModel, 'fill-mask': MobileBertForMaskedLM, 'question-answering': MobileBertForQuestionAnswering, 'text-classification': MobileBertForSequenceClassification, 'token-classification': MobileBertForTokenClassification, 'zero-shot': MobileBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Dict: '''simple docstring''' lowercase_ : int = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): lowercase_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__UpperCamelCase ) lowercase_ : Optional[int] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCamelCase ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Dict = MobileBertModelTester(self ) lowercase_ : Any = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ): return torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE =1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(__UpperCamelCase ) lowercase_ : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowercase_ : List[Any] = model(__UpperCamelCase )[0] lowercase_ : Union[str, Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [ [ [-2.4736526e07, 8.2691656e04, 1.6521838e05], [-5.7541704e-01, 3.9056022e00, 4.4011507e00], [2.6047359e00, 1.5677652e00, -1.7324188e-01], ] ] ,device=__UpperCamelCase ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase_ : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __SCREAMING_SNAKE_CASE =importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __SCREAMING_SNAKE_CASE =[ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__( __SCREAMING_SNAKE_CASE : str ): if "://" in dataset_path: lowercase_ : List[Any] = dataset_path.split('://' )[1] return dataset_path def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ): if fs is not None and fs.protocol != "file": return True else: return False def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): lowercase_ : Optional[int] = not is_remote_filesystem(__SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__SCREAMING_SNAKE_CASE ) , fs._strip_protocol(__SCREAMING_SNAKE_CASE ) ) else: fs.mv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , recursive=__SCREAMING_SNAKE_CASE ) def lowercase__( ): if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowercase_ : str = None lowercase_ : Dict = None lowercase_ : Optional[Any] = threading.Lock()
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __SCREAMING_SNAKE_CASE ={ "yjernite/retribert-base-uncased": 512, } __SCREAMING_SNAKE_CASE ={ "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RetriBertTokenizer lowercase = ['input_ids', 'attention_mask'] def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' super().__init__( __UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars ): lowercase_ : List[str] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) ) lowercase_ : Tuple = do_lower_case lowercase_ : List[Any] = strip_accents lowercase_ : Dict = tokenize_chinese_chars lowercase_ : Any = normalizer_class(**__UpperCamelCase ) lowercase_ : str = do_lower_case def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int: '''simple docstring''' lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Dict = [self.sep_token_id] lowercase_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : str = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase )
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : list ): if not grid or not grid[0]: raise TypeError('The grid does not contain the appropriate information' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowercase_ : int = grid[0] for row_n in range(1 , len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : Any = grid[row_n] lowercase_ : Dict = fill_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = grid[row_n] return grid[-1][-1] def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(__SCREAMING_SNAKE_CASE ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) @dataclass class __lowerCamelCase ( lowercase_ ): lowercase = field( default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} ) lowercase = field(default=lowercase_ , metadata={'help': 'Whether to SortishSamler or not.'} ) lowercase = field( default=lowercase_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowercase = field(default=lowercase_ , metadata={'help': 'whether to use adafactor'} ) lowercase = field( default=lowercase_ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} ) lowercase = field( default=lowercase_ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} ) lowercase = field(default=lowercase_ , metadata={'help': 'Dropout probability. Goes into model.config.'} ) lowercase = field( default=lowercase_ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} ) lowercase = field( default='linear' , metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0