code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase__ ( ): lowercase = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" lowercase = Image.open(requests.get(__A ,stream=__A ).raw ).convert("""RGB""" ) return image def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = dct.pop(__A ) lowercase = val def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) lowercase = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict lowercase = torch.cat((q_bias, torch.zeros_like(__A ,requires_grad=__A ), v_bias) ) lowercase = qkv_bias def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 364 if """coco""" in model_name else 224 lowercase = BlipaVisionConfig(image_size=__A ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowercase = OPTConfig.from_pretrained("""facebook/opt-2.7b""" ,eos_token_id=__A ).to_dict() elif "opt-6.7b" in model_name: lowercase = OPTConfig.from_pretrained("""facebook/opt-6.7b""" ,eos_token_id=__A ).to_dict() elif "t5-xl" in model_name: lowercase = TaConfig.from_pretrained("""google/flan-t5-xl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase = TaConfig.from_pretrained("""google/flan-t5-xxl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict() lowercase = BlipaConfig(vision_config=__A ,text_config=__A ) return config, image_size @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ): lowercase = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) lowercase = tokenizer("""\n""" ,add_special_tokens=__A ).input_ids[0] lowercase , lowercase = get_blipa_config(__A ,eos_token_id=__A ) lowercase = BlipaForConditionalGeneration(__A ).eval() lowercase = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } lowercase , lowercase = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) lowercase = """cuda""" if torch.cuda.is_available() else """cpu""" lowercase , lowercase , lowercase = load_model_and_preprocess( name=__A ,model_type=__A ,is_eval=__A ,device=__A ) original_model.eval() print("""Done!""" ) # update state dict keys lowercase = original_model.state_dict() lowercase = create_rename_keys(__A ) for src, dest in rename_keys: rename_key(__A ,__A ,__A ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase = state_dict.pop(__A ) if key.startswith("""Qformer.bert""" ): lowercase = key.replace("""Qformer.bert""" ,"""qformer""" ) if "attention.self" in key: lowercase = key.replace("""self""" ,"""attention""" ) if "opt_proj" in key: lowercase = key.replace("""opt_proj""" ,"""language_projection""" ) if "t5_proj" in key: lowercase = key.replace("""t5_proj""" ,"""language_projection""" ) if key.startswith("""opt""" ): lowercase = key.replace("""opt""" ,"""language""" ) if key.startswith("""t5""" ): lowercase = key.replace("""t5""" ,"""language""" ) lowercase = val # read in qv biases read_in_q_v_bias(__A ,__A ) lowercase , lowercase = hf_model.load_state_dict(__A ,strict=__A ) assert len(__A ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowercase = load_demo_image() lowercase = vis_processors["""eval"""](__A ).unsqueeze(0 ).to(__A ) lowercase = tokenizer(["""\n"""] ,return_tensors="""pt""" ).input_ids.to(__A ) # create processor lowercase = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} ,image_mean=__A ,image_std=__A ) lowercase = BlipaProcessor(image_processor=__A ,tokenizer=__A ) lowercase = processor(images=__A ,return_tensors="""pt""" ).pixel_values.to(__A ) # make sure processor creates exact same pixel values assert torch.allclose(__A ,__A ) original_model.to(__A ) hf_model.to(__A ) with torch.no_grad(): if "opt" in model_name: lowercase = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits lowercase = hf_model(__A ,__A ).logits else: lowercase = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits lowercase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100 ) lowercase = hf_model(__A ,__A ,labels=__A ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" ,original_logits[0, :3, :3] ) print("""First values of HF logits:""" ,logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowercase = torch.tensor( [[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] ,device=__A ) assert torch.allclose(logits[0, :3, :3] ,__A ,atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowercase = torch.tensor( [[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] ,device=__A ) else: # cast to same type lowercase = logits.dtype assert torch.allclose(original_logits.to(__A ) ,__A ,atol=1E-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) lowercase = """""" lowercase = tokenizer(__A ,return_tensors="""pt""" ).input_ids.to(__A ) lowercase = original_model.generate({"""image""": original_pixel_values} ) lowercase = hf_model.generate( __A ,__A ,do_sample=__A ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,) print("""Original generation:""" ,__A ) lowercase = input_ids.shape[1] lowercase = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=__A ) lowercase = [text.strip() for text in output_text] print("""HF generation:""" ,__A ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__A ) hf_model.save_pretrained(__A ) if push_to_hub: processor.push_to_hub(f"""nielsr/{model_name}""" ) hf_model.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() __SCREAMING_SNAKE_CASE : Dict =[ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __SCREAMING_SNAKE_CASE : Tuple =parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
701
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
from __future__ import annotations def UpperCamelCase__ ( lowerCAmelCase__ = 4 ): lowercase = abs(_SCREAMING_SNAKE_CASE ) or 4 return [[1 + x + y * row_size for x in range(_SCREAMING_SNAKE_CASE )] for y in range(_SCREAMING_SNAKE_CASE )] def UpperCamelCase__ ( lowerCAmelCase__ ): return reverse_row(transpose(_SCREAMING_SNAKE_CASE ) ) # OR.. transpose(reverse_column(matrix)) def UpperCamelCase__ ( lowerCAmelCase__ ): return reverse_row(reverse_column(_SCREAMING_SNAKE_CASE ) ) # OR.. reverse_column(reverse_row(matrix)) def UpperCamelCase__ ( lowerCAmelCase__ ): return reverse_column(transpose(_SCREAMING_SNAKE_CASE ) ) # OR.. transpose(reverse_row(matrix)) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [list(_SCREAMING_SNAKE_CASE ) for x in zip(*_SCREAMING_SNAKE_CASE )] return matrix def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = matrix[::-1] return matrix def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [x[::-1] for x in matrix] return matrix def UpperCamelCase__ ( lowerCAmelCase__ ): for i in matrix: print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) __SCREAMING_SNAKE_CASE : Tuple =make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) __SCREAMING_SNAKE_CASE : Optional[int] =make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
702
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
0
__SCREAMING_SNAKE_CASE : Optional[Any] ={ "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on __SCREAMING_SNAKE_CASE : Union[str, Any] ={value: key for key, value in MORSE_CODE_DICT.items()} def UpperCamelCase__ ( lowerCAmelCase__ ): return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def UpperCamelCase__ ( lowerCAmelCase__ ): return "".join(REVERSE_DICT[char] for char in message.split() ) def UpperCamelCase__ ( ): lowercase = """Morse code here!""" print(lowerCAmelCase__ ) lowercase = encrypt(lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = decrypt(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": main()
703
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A_ ( __a ): _A :Tuple = '''data2vec-audio''' def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ): super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) lowercase = hidden_size lowercase = feat_extract_activation lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = conv_bias lowercase = num_conv_pos_embeddings lowercase = num_conv_pos_embedding_groups lowercase = conv_pos_kernel_size lowercase = len(self.conv_dim ) lowercase = num_hidden_layers lowercase = intermediate_size lowercase = hidden_act lowercase = num_attention_heads lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = feat_proj_dropout lowercase = final_dropout lowercase = layerdrop lowercase = layer_norm_eps lowercase = initializer_range lowercase = vocab_size lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase = mask_time_prob lowercase = mask_time_length lowercase = mask_time_min_masks lowercase = mask_feature_prob lowercase = mask_feature_length lowercase = mask_feature_min_masks # ctc loss lowercase = ctc_loss_reduction lowercase = ctc_zero_infinity # adapter lowercase = add_adapter lowercase = adapter_kernel_size lowercase = adapter_stride lowercase = num_adapter_layers lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return math.prod(self.conv_stride )
72
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__) class lowercase ( __a ): _A :Any = ['''pixel_values'''] def __init__( self : Optional[int] , snake_case__ : bool = True , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : List[Any] , ): super().__init__(**UpperCamelCase_ ) lowercase = size if size is not None else {'shortest_edge': 2_56} lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowercase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} lowercase = get_size_dict(UpperCamelCase_ ) lowercase = do_resize lowercase = size lowercase = resample lowercase = do_center_crop lowercase = crop_size lowercase = do_rescale lowercase = rescale_factor lowercase = do_normalize lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Any , ): lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowercase = get_resize_output_image_size(UpperCamelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[Any] , ): lowercase = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Union[str, Any] ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : Union[str, Any] , ): lowercase = do_resize if do_resize is not None else self.do_resize lowercase = size if size is not None else self.size lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowercase = resample if resample is not None else self.resample lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase = crop_size if crop_size is not None else self.crop_size lowercase = get_size_dict(UpperCamelCase_ ) lowercase = do_rescale if do_rescale is not None else self.do_rescale lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase = do_normalize if do_normalize is not None else self.do_normalize lowercase = image_mean if image_mean is not None else self.image_mean lowercase = image_std if image_std is not None else self.image_std lowercase = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowercase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: lowercase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: lowercase = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: lowercase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: lowercase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] lowercase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowercase = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
704
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" ) lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] ) lowercase = checkpoint["""model"""] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()} lowercase = XGLMConfig( vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase = XGLMForCausalLM(lowerCAmelCase__ ) lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( lowercase__ , unittest.TestCase ): _A :Dict = KandinskyVaaControlnetImgaImgPipeline _A :Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] _A :Dict = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] _A :str = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _A :Optional[Any] = False @property def SCREAMING_SNAKE_CASE__ ( self : str ): return 32 @property def SCREAMING_SNAKE_CASE__ ( self : int ): return 32 @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return self.time_input_dim @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE__ ( self : Any ): return 1_00 @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): torch.manual_seed(0 ) lowercase = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowercase = UNetaDConditionModel(**__lowerCamelCase ) return model @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): torch.manual_seed(0 ) lowercase = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.dummy_unet lowercase = self.dummy_movq lowercase = { "num_train_timesteps": 10_00, "beta_schedule": "linear", "beta_start": 0.00_085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowercase = DDIMScheduler(**__lowerCamelCase ) lowercase = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : str , snake_case__ : Union[str, Any]=0 ): lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowercase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create hint lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("""mps""" ): lowercase = torch.manual_seed(__lowerCamelCase ) else: lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowercase = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = "cpu" lowercase = self.get_dummy_components() lowercase = self.pipeline_class(**__lowerCamelCase ) lowercase = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowercase = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowercase = output.images lowercase = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowercase = image[0, -3:, -3:, -1] lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase = np.array( [0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" ) lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) lowercase = init_image.resize((5_12, 5_12) ) lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) lowercase = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 2_55.0 lowercase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowercase = "A robot, 4k photo" lowercase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowercase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) lowercase = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase = pipe_prior( __lowerCamelCase , image=__lowerCamelCase , strength=0.85 , generator=__lowerCamelCase , negative_prompt="""""" , ).to_tuple() lowercase = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , ) lowercase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
705
from __future__ import annotations import bisect def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left <= right: lowercase = left + (right - left) // 2 lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase = midpoint - 1 else: lowercase = midpoint + 1 return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ) if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if right < left: return None lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip() __SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''',''')) __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n''')) __SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
72
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __SCREAMING_SNAKE_CASE : List[Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[str] =OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) __SCREAMING_SNAKE_CASE : List[str] =OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) __SCREAMING_SNAKE_CASE : Union[str, Any] =OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) __SCREAMING_SNAKE_CASE : str =OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) __SCREAMING_SNAKE_CASE : int =OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) __SCREAMING_SNAKE_CASE : int =OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) __SCREAMING_SNAKE_CASE : List[Any] =OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) __SCREAMING_SNAKE_CASE : int =OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) __SCREAMING_SNAKE_CASE : str =OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) __SCREAMING_SNAKE_CASE : List[str] =OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) __SCREAMING_SNAKE_CASE : List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : List[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Optional[Any] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Any =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : int =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Any =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : List[str] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Dict =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Optional[Any] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : List[Any] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : List[str] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A_ ( _BaseAutoModelClass ): _A :List[Any] = FLAX_MODEL_MAPPING __SCREAMING_SNAKE_CASE : Tuple =auto_class_update(FlaxAutoModel) class A_ ( _BaseAutoModelClass ): _A :Optional[int] = FLAX_MODEL_FOR_PRETRAINING_MAPPING __SCREAMING_SNAKE_CASE : Any =auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class A_ ( _BaseAutoModelClass ): _A :Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __SCREAMING_SNAKE_CASE : List[str] =auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class A_ ( _BaseAutoModelClass ): _A :int = FLAX_MODEL_FOR_MASKED_LM_MAPPING __SCREAMING_SNAKE_CASE : int =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class A_ ( _BaseAutoModelClass ): _A :Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __SCREAMING_SNAKE_CASE : Union[str, Any] =auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class A_ ( _BaseAutoModelClass ): _A :Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : str =auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class A_ ( _BaseAutoModelClass ): _A :str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __SCREAMING_SNAKE_CASE : List[str] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class A_ ( _BaseAutoModelClass ): _A :List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : Any =auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class A_ ( _BaseAutoModelClass ): _A :List[str] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __SCREAMING_SNAKE_CASE : Optional[Any] =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class A_ ( _BaseAutoModelClass ): _A :Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __SCREAMING_SNAKE_CASE : Optional[Any] =auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class A_ ( _BaseAutoModelClass ): _A :int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : Optional[int] =auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class A_ ( _BaseAutoModelClass ): _A :Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __SCREAMING_SNAKE_CASE : int =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class A_ ( _BaseAutoModelClass ): _A :Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __SCREAMING_SNAKE_CASE : List[str] =auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
706
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = multiprocessing.Manager() lowercase = manager.list() lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase = shutil.rmtree lowercase = os.rmdir lowercase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase = {} with swallow_io(): with time_limit(lowerCAmelCase__ ): exec(lowerCAmelCase__ ,lowerCAmelCase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase = rmtree lowercase = rmdir lowercase = chdir @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ ) signal.signal(signal.SIGALRM ,lowerCAmelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0 ) @contextlib.contextmanager def UpperCamelCase__ ( ): lowercase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase__ ): with contextlib.redirect_stderr(lowerCAmelCase__ ): with redirect_stdin(lowerCAmelCase__ ): yield @contextlib.contextmanager def UpperCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase__ ): yield dirname class A_ ( __a ): pass class A_ ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ): return False class A_ ( contextlib._RedirectStream ): # type: ignore _A :List[Any] = '''stdin''' @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): if root == ".": yield return lowercase = os.getcwd() os.chdir(lowerCAmelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase = None lowercase = None import os lowercase = """1""" lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None import shutil lowercase = None lowercase = None lowercase = None import subprocess lowercase = None # type: ignore lowercase = None import sys lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None
72
0
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : Optional[int] =logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str =tf.data.AUTOTUNE def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" ,type=lowerCAmelCase__ ,default="""roberta-base""" ,help="""The model config to use. Note that we don\'t copy the model\'s weights, only the config!""" ,) parser.add_argument( """--tokenizer""" ,type=lowerCAmelCase__ ,default="""unigram-tokenizer-wikitext""" ,help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.""" ,) parser.add_argument( """--per_replica_batch_size""" ,type=lowerCAmelCase__ ,default=8 ,help="""Batch size per TPU core.""" ,) parser.add_argument( """--no_tpu""" ,action="""store_true""" ,help="""If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.""" ,) parser.add_argument( """--tpu_name""" ,type=lowerCAmelCase__ ,help="""Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.""" ,default="""local""" ,) parser.add_argument( """--tpu_zone""" ,type=lowerCAmelCase__ ,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" ,) parser.add_argument( """--gcp_project""" ,type=lowerCAmelCase__ ,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" ,action="""store_true""" ,help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" ,) parser.add_argument( """--train_dataset""" ,type=lowerCAmelCase__ ,help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" ,) parser.add_argument( """--shuffle_buffer_size""" ,type=lowerCAmelCase__ ,default=2**18 ,help="""Size of the shuffle buffer (in samples)""" ,) parser.add_argument( """--eval_dataset""" ,type=lowerCAmelCase__ ,help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" ,) parser.add_argument( """--num_epochs""" ,type=lowerCAmelCase__ ,default=1 ,help="""Number of epochs to train for.""" ,) parser.add_argument( """--learning_rate""" ,type=lowerCAmelCase__ ,default=1E-4 ,help="""Learning rate to use for training.""" ,) parser.add_argument( """--weight_decay_rate""" ,type=lowerCAmelCase__ ,default=1E-3 ,help="""Weight decay rate to use for training.""" ,) parser.add_argument( """--max_length""" ,type=lowerCAmelCase__ ,default=512 ,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" ,) parser.add_argument( """--mlm_probability""" ,type=lowerCAmelCase__ ,default=0.15 ,help="""Fraction of tokens to mask during training.""" ,) parser.add_argument("""--output_dir""" ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" ,type=lowerCAmelCase__ ,help="""Model ID to upload to on the Hugging Face Hub.""" ) lowercase = parser.parse_args() return args def UpperCamelCase__ ( lowerCAmelCase__ ): try: if args.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(lowerCAmelCase__ ) tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ ) return tpu def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 0 for file in file_list: lowercase = file.split("""/""" )[-1] lowercase = re.search(r"""-\d+-(\d+)\.tfrecord""" ,lowerCAmelCase__ ).group(1 ) lowercase = int(lowerCAmelCase__ ) num_samples += sample_count return num_samples def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = count_samples(lowerCAmelCase__ ) lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ ) if shuffle: lowercase = dataset.shuffle(len(lowerCAmelCase__ ) ) lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ ,num_parallel_reads=lowerCAmelCase__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) ) lowercase = dataset.map(lowerCAmelCase__ ,num_parallel_calls=lowerCAmelCase__ ) if shuffle: assert shuffle_buffer_size is not None lowercase = dataset.shuffle(args.shuffle_buffer_size ) lowercase = dataset.batch(lowerCAmelCase__ ,drop_remainder=lowerCAmelCase__ ) lowercase = dataset.map(lowerCAmelCase__ ,num_parallel_calls=lowerCAmelCase__ ) lowercase = dataset.prefetch(lowerCAmelCase__ ) return dataset def UpperCamelCase__ ( lowerCAmelCase__ ): if not args.no_tpu: lowercase = initialize_tpu(lowerCAmelCase__ ) lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ ) else: lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) lowercase = AutoTokenizer.from_pretrained(args.tokenizer ) lowercase = AutoConfig.from_pretrained(args.pretrained_model_config ) lowercase = tokenizer.vocab_size lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset ,"""*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"""*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) lowercase = count_samples(lowerCAmelCase__ ) lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowercase = steps_per_epoch * args.num_epochs with strategy.scope(): lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowercase = create_optimizer( num_train_steps=lowerCAmelCase__ ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowerCAmelCase__ ,metrics=["""accuracy"""] ) def decode_fn(lowerCAmelCase__ ): lowercase = { 'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ), 'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ), } return tf.io.parse_single_example(lowerCAmelCase__ ,lowerCAmelCase__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowercase = DataCollatorForLanguageModeling( tokenizer=lowerCAmelCase__ ,mlm_probability=args.mlm_probability ,mlm=lowerCAmelCase__ ,return_tensors="""tf""" ) def mask_with_collator(lowerCAmelCase__ ): # TF really needs an isin() function lowercase = ( ~tf.cast(batch["""attention_mask"""] ,tf.bool ) | (batch['input_ids'] == tokenizer.cls_token_id) | (batch['input_ids'] == tokenizer.sep_token_id) ) lowercase = data_collator.tf_mask_tokens( batch["""input_ids"""] ,vocab_size=len(lowerCAmelCase__ ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=lowerCAmelCase__ ,) return batch lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync lowercase = prepare_dataset( lowerCAmelCase__ ,decode_fn=lowerCAmelCase__ ,mask_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,shuffle=lowerCAmelCase__ ,shuffle_buffer_size=args.shuffle_buffer_size ,) lowercase = prepare_dataset( lowerCAmelCase__ ,decode_fn=lowerCAmelCase__ ,mask_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,shuffle=lowerCAmelCase__ ,) lowercase = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=lowerCAmelCase__ ) ) model.fit( lowerCAmelCase__ ,validation_data=lowerCAmelCase__ ,epochs=args.num_epochs ,callbacks=lowerCAmelCase__ ,) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Tuple =parse_args() main(args)
707
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __a ): _A :Optional[int] = ['''image_processor''', '''tokenizer'''] _A :Tuple = '''BlipImageProcessor''' _A :List[Any] = '''AutoTokenizer''' def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Dict ): lowercase = False super().__init__(snake_case__ , snake_case__ ) lowercase = self.image_processor def __call__( self : List[str] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowercase = self.tokenizer lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : int , **snake_case__ : int ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
72
0
import warnings from ..trainer import Trainer from ..utils import logging __SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__) class A_ ( __A ): def __init__( self : Any , snake_case__ : Any=None , **snake_case__ : List[Any] ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" , snake_case__ , ) super().__init__(args=snake_case__ , **snake_case__ )
708
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCamelCase__ ( lowerCAmelCase__ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase = model_type_to_module_name(lowerCAmelCase__ ) lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" ) try: return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase = importlib.import_module("""transformers""" ) if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ): return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,): lowercase = get_file_from_repo( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader: return json.load(lowerCAmelCase__ ) class A_ : def __init__( self : List[Any] ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ): lowercase = kwargs.pop("""config""" , snake_case__ ) lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ ) lowercase = True lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ ) lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ ) lowercase = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case__ , snake_case__ ): lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) # It could be in `config.feature_extractor_type`` lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ ) if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: lowercase = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: lowercase = feature_extractor_class_from_name(snake_case__ ) lowercase = feature_extractor_auto_map is not None lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING lowercase = resolve_trust_remote_code( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if has_remote_code and trust_remote_code: lowercase = get_class_from_dynamic_module( snake_case__ , snake_case__ , **snake_case__ ) lowercase = kwargs.pop("""code_revision""" , snake_case__ ) if os.path.isdir(snake_case__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )] return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ): FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
72
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __SCREAMING_SNAKE_CASE : Optional[Any] =logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : Union[str, Any] ='pytorch_model.bin' @dataclasses.dataclass class A_ : _A = dataclasses.field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} ) _A = dataclasses.field( default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , ) @dataclasses.dataclass class A_ : _A = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} ) _A = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} ) _A = dataclasses.field( default=__a , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) _A = dataclasses.field( default=__a , metadata={'''help''': '''The name of the task to train on.'''} , ) _A = dataclasses.field( default=__a , metadata={'''help''': '''The list of labels for the task.'''} ) @dataclasses.dataclass class A_ : _A = dataclasses.field( metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} ) _A = dataclasses.field( default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} ) _A = dataclasses.field( default='''no''' , metadata={ '''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]''' } , ) _A = dataclasses.field( default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) _A = dataclasses.field( default=0.0 , metadata={ '''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.''' } , ) _A = dataclasses.field( default=__a , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , ) _A = dataclasses.field( default=__a , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , ) _A = dataclasses.field( default=__a , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , ) _A = dataclasses.field( default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , ) _A = dataclasses.field( default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) _A = dataclasses.field( default=__a , metadata={'''help''': '''Random seed for initialization.'''} , ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 ) if args.do_filter_by_confidence: lowercase = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowercase = int(eval_result * len(__lowerCAmelCase ) ) print(__lowerCAmelCase ) lowercase = dataset.sort("""probability""" ,reverse=__lowerCAmelCase ) lowercase = dataset.select(range(__lowerCAmelCase ) ) lowercase = dataset.remove_columns(["""label""", """probability"""] ) lowercase = dataset.rename_column("""prediction""" ,"""label""" ) lowercase = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} ) lowercase = dataset.shuffle(seed=args.seed ) lowercase = os.path.join(__lowerCAmelCase ,f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCAmelCase ,index=__lowerCAmelCase ) else: dataset.to_json(__lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ): lowercase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowercase = STModelArguments(model_name_or_path=__lowerCAmelCase ) lowercase = STDataArguments(train_file=__lowerCAmelCase ,infer_file=__lowerCAmelCase ) lowercase = STTrainingArguments(output_dir=__lowerCAmelCase ) lowercase = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCAmelCase ).items(): setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for key, value in kwargs.items(): if hasattr(__lowerCAmelCase ,__lowerCAmelCase ): setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) # Sanity checks lowercase = {} lowercase = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowercase = args.train_file lowercase = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowercase = args.eval_file for key in data_files: lowercase = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: lowercase = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) lowercase = f"""{args.output_dir}/self-train_iter-{{}}""".format lowercase = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir ,exist_ok=__lowerCAmelCase ) os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase ) accelerator.wait_for_everyone() lowercase = None lowercase = None lowercase = 0 lowercase = False # Show the progress bar lowercase = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 ,int(args.max_selftrain_iterations ) ): lowercase = data_dir_format(__lowerCAmelCase ) assert os.path.exists(__lowerCAmelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowercase = os.path.join(__lowerCAmelCase ,"""stage-1""" ) lowercase = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCAmelCase ,__lowerCAmelCase ): arguments_dict.update({key: value} ) lowercase = os.path.join(__lowerCAmelCase ,"""best-checkpoint""" ,__lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" ,__lowerCAmelCase ,__lowerCAmelCase ,) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" ,__lowerCAmelCase ) finetune(**__lowerCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" ,__lowerCAmelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowercase = os.path.join(__lowerCAmelCase ,"""best-checkpoint""" ) lowercase = os.path.join(__lowerCAmelCase ,"""stage-2""" ) # Update arguments_dict lowercase = model_path lowercase = data_files["""train"""] lowercase = current_output_dir lowercase = os.path.join(__lowerCAmelCase ,"""best-checkpoint""" ,__lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" ,__lowerCAmelCase ,__lowerCAmelCase ,) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" ,__lowerCAmelCase ) finetune(**__lowerCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" ,__lowerCAmelCase ) lowercase = iteration lowercase = data_dir_format(iteration + 1 ) lowercase = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase ,"""best-checkpoint""" ) ) lowercase = config.idalabel lowercase = os.path.join(__lowerCAmelCase ,"""eval_results_best-checkpoint.json""" ) lowercase = os.path.join(__lowerCAmelCase ,"""test_results_best-checkpoint.json""" ) assert os.path.exists(__lowerCAmelCase ) with open(__lowerCAmelCase ,"""r""" ) as f: lowercase = float(json.load(__lowerCAmelCase )[args.eval_metric] ) lowercase = os.path.join(__lowerCAmelCase ,"""infer_output_best-checkpoint.csv""" ) assert os.path.exists(__lowerCAmelCase ) # Loading the dataset from local csv or json files. lowercase = load_dataset(args.data_file_extension ,data_files={"""data""": data_files["""infer"""]} )["""data"""] lowercase = load_dataset("""csv""" ,data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase ) shutil.copy(__lowerCAmelCase ,os.path.join(__lowerCAmelCase ,f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__lowerCAmelCase ): shutil.copy(__lowerCAmelCase ,os.path.join(__lowerCAmelCase ,f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) accelerator.wait_for_everyone() lowercase = os.path.join(__lowerCAmelCase ,f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: lowercase = eval_result if best_iteration is None: lowercase = new_iteration lowercase = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowercase = new_iteration lowercase = new_eval_result lowercase = 0 else: if new_eval_result == best_eval_result: lowercase = new_iteration lowercase = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowercase = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" ,__lowerCAmelCase ) logger.info("""Best evaluation result: %s = %f""" ,args.eval_metric ,__lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCAmelCase ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(__lowerCAmelCase ,"""eval_results_best-iteration.json""" ) ,) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" ,args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" ,args.eval_metric ,__lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCAmelCase ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(__lowerCAmelCase ,"""eval_results_best-iteration.json""" ) ,)
709
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''') __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : List[str] ={ **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Optional[int] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : List[str] =[ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value elif weight_type == "running_mean": lowercase = value elif weight_type == "running_var": lowercase = value elif weight_type == "num_batches_tracked": lowercase = value else: lowercase = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] if task == "s2t": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2T lowercase = IGNORE_KEYS_S2T elif task == "t2s": lowercase = None lowercase = MAPPING_T2S lowercase = IGNORE_KEYS_T2S elif task == "s2s": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2S lowercase = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowercase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: lowercase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase = True if "*" in mapped_key: lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: lowercase = """weight""" elif "running_mean" in name: lowercase = """running_mean""" elif "running_var" in name: lowercase = """running_var""" elif "num_batches_tracked" in name: lowercase = """num_batches_tracked""" else: lowercase = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,): if config_path is not None: lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = SpeechTaConfig() if task == "s2t": lowercase = config.max_text_positions lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": lowercase = 1_876 lowercase = 600 lowercase = config.max_speech_positions lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": lowercase = 1_876 lowercase = config.max_speech_positions lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) lowercase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowercase = SpeechTaFeatureExtractor() lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowercase = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
72
0
from __future__ import annotations def UpperCamelCase__ ( lowerCAmelCase__ ): if len(lowerCamelCase__ ) == 0: return array lowercase = min(lowerCamelCase__ ), max(lowerCamelCase__ ) # Compute the variables lowercase = _max - _min + 1 lowercase = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: lowercase = i - _min lowercase = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. lowercase = 0 for i in range(lowerCamelCase__ ): while holes_repeat[i] > 0: lowercase = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : Optional[int] =input('''Enter numbers separated by comma:\n''') __SCREAMING_SNAKE_CASE : str =[int(x) for x in user_input.split(''',''')] print(pigeon_sort(unsorted))
710
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE : List[Any] ='''.''' if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : Dict =[] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE : Optional[Any] =line.strip() __SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
0
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __SCREAMING_SNAKE_CASE : str ={ '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_lowerCAmelCase ) ,version.parse(_lowerCAmelCase ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ): lowercase = f"""\n{hint}""" if hint is not None else "" # non-versioned check if re.match(r"""^[\w_\-\d]+$""" ,_lowerCAmelCase ): lowercase = requirement, None, None else: lowercase = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" ,_lowerCAmelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but""" f""" got {requirement}""" ) lowercase = match[0] lowercase = want_full.split(""",""" ) # there could be multiple requirements lowercase = {} for w in want_range: lowercase = re.findall(r"""^([\s!=<>]{1,2})(.+)""" ,_lowerCAmelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,""" f""" but got {requirement}""" ) lowercase = match[0] lowercase = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase = ".".join([str(_lowerCAmelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) return # check if any version is installed try: lowercase = importlib.metadata.version(_lowerCAmelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The \'{requirement}\' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(_lowerCAmelCase ,_lowerCAmelCase )
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( UpperCamelCase_ ): _A :List[Any] = """ClapFeatureExtractor""" _A :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] ): super().__init__(_a , _a ) def __call__( self : Any , snake_case__ : List[str]=None , snake_case__ : Any=None , snake_case__ : List[str]=None , **snake_case__ : List[str] ): lowercase = kwargs.pop("""sampling_rate""" , _a ) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""" ) if text is not None: lowercase = self.tokenizer(_a , return_tensors=_a , **_a ) if audios is not None: lowercase = self.feature_extractor( _a , sampling_rate=_a , return_tensors=_a , **_a ) if text is not None and audios is not None: lowercase = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : str ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Union[str, Any] , **snake_case__ : Dict ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.tokenizer.model_input_names lowercase = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
712
import argparse import os import re import packaging.version __SCREAMING_SNAKE_CASE : Optional[int] ='''examples/''' __SCREAMING_SNAKE_CASE : Any ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __SCREAMING_SNAKE_CASE : Any ='''README.md''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.read() lowercase , lowercase = REPLACE_PATTERNS[pattern] lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ ) lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): for folder, directories, fnames in os.walk(lowerCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) if not patch: update_version_in_examples(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = """🤗 Transformers currently provides the following architectures""" lowercase = """1. Want to contribute a new model?""" with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.readlines() # Find the start of the list. lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowerCAmelCase__ ) def UpperCamelCase__ ( ): with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: lowercase = f.read() lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0] return packaging.version.parse(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=False ): lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase = default_version.base_version elif patch: lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def UpperCamelCase__ ( ): lowercase = get_version() lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase = current_version.base_version # Check with the user we got that right. lowercase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
72
0
from graphs.minimum_spanning_tree_kruskal import kruskal def UpperCamelCase__ ( ): lowercase = 9 lowercase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowercase = kruskal(__UpperCAmelCase ,__UpperCAmelCase ) lowercase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__UpperCAmelCase ) == sorted(__UpperCAmelCase )
713
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ={ '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class A_ ( __a ): _A :List[str] = '''pix2struct_text_model''' _A :int = ['''past_key_values'''] _A :Optional[Any] = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ): lowercase = vocab_size lowercase = hidden_size lowercase = d_kv lowercase = d_ff lowercase = num_layers lowercase = num_heads lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = dropout_rate lowercase = layer_norm_epsilon lowercase = initializer_factor lowercase = use_cache lowercase = eos_token_id lowercase = decoder_start_token_id # for backwards compatibility lowercase = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :Optional[int] = '''pix2struct_vision_model''' def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowercase = hidden_size lowercase = patch_embed_hidden_size lowercase = d_ff lowercase = dropout_rate lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = initializer_range lowercase = initializer_factor lowercase = attention_dropout lowercase = layer_norm_eps lowercase = dense_act_fn lowercase = seq_len lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :int = '''pix2struct''' _A :str = True def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: lowercase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) lowercase = PixaStructTextConfig(**snake_case__ ) lowercase = PixaStructVisionConfig(**snake_case__ ) lowercase = self.text_config.decoder_start_token_id lowercase = self.text_config.pad_token_id lowercase = self.text_config.eos_token_id lowercase = initializer_factor lowercase = initializer_range lowercase = self.initializer_range lowercase = self.initializer_range lowercase = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if height >= 1: move_tower(height - 1 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) move_disk(lowerCAmelCase__ ,lowerCAmelCase__ ) move_tower(height - 1 ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): print("""moving disk from""" ,lowerCAmelCase__ ,"""to""" ,lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = int(input("""Height of hanoi: """ ).strip() ) move_tower(lowerCAmelCase__ ,"""A""" ,"""B""" ,"""C""" ) if __name__ == "__main__": main()
714
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): # Test when on a single CPU or GPU that the context manager does nothing lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): # Test on distributed setup that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
72
0
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __SCREAMING_SNAKE_CASE : Dict =logging.getLogger() def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser() parser.add_argument("""-f""" ) lowercase = parser.parse_args() return args.f class A_ ( __lowerCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] ): lowercase = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , """run_glue_deebert.py""" ) with patch.object(_UpperCamelCase , """argv""" , _UpperCamelCase ): lowercase = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_UpperCamelCase , 0.666 ) @slow @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(_UpperCamelCase ) lowercase = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(_UpperCamelCase ) lowercase = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(_UpperCamelCase )
715
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): _A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = 0 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write("""{}""" ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) lowercase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): class A_ ( __a ): _A :List[str] = False class A_ ( __a ): _A :Dict = False class A_ ( __a ): _A :Union[str, Any] = '''AutoFeatureExtractor''' _A :Tuple = '''AutoTokenizer''' _A :Optional[Any] = False try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class A_ ( unittest.TestCase ): _A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): lowercase = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ): try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , ) lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f: lowercase = json.load(snake_case__ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
72
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_A ).to(_A ) lowercase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowercase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids lowercase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids lowercase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss lowercase = -(labels.shape[-1] * loss.item()) lowercase = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
716
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" ,[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" ,[ DatasetInfo(), DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_info.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfo.from_directory(lowerCAmelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) ) def UpperCamelCase__ ( ): lowercase = DatasetInfo( description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,) lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) lowercase = yaml.safe_dump(lowerCAmelCase__ ) lowercase = yaml.safe_load(lowerCAmelCase__ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase__ ( ): lowercase = DatasetInfo() lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" ,[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_infos_dict.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
72
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE : Optional[int] =logging.get_logger(__name__) class A_ ( _A ): _A :Optional[Any] = ['''pixel_values'''] def __init__( self : Union[str, Any] , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : int , ): super().__init__(**__lowerCamelCase ) lowercase = do_rescale lowercase = rescale_factor lowercase = do_pad lowercase = pad_size def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : str ): return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ): lowercase = get_image_size(__lowerCamelCase ) lowercase = (old_height // size + 1) * size - old_height lowercase = (old_width // size + 1) * size - old_width return pad(__lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : Optional[int] , ): lowercase = do_rescale if do_rescale is not None else self.do_rescale lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase = do_pad if do_pad is not None else self.do_pad lowercase = pad_size if pad_size is not None else self.pad_size lowercase = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase = [to_numpy_array(__lowerCamelCase ) for image in images] if do_rescale: lowercase = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_pad: lowercase = [self.pad(__lowerCamelCase , size=__lowerCamelCase ) for image in images] lowercase = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] lowercase = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
717
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = args.pruning_method lowercase = args.threshold lowercase = args.model_name_or_path.rstrip("""/""" ) lowercase = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) lowercase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: lowercase = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase , lowercase = -0.1, 1.1 lowercase = torch.sigmoid(lowerCAmelCase__ ) lowercase = s * (r - l) + l lowercase = s_bar.clamp(min=0.0 ,max=1.0 ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowercase = os.path.join( os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) __SCREAMING_SNAKE_CASE : str =parser.parse_args() main(args)
72
0
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def UpperCamelCase__ ( lowerCAmelCase__ ): # picklable for multiprocessing return x.sum() def UpperCamelCase__ ( lowerCAmelCase__ ): # picklable for multiprocessing return i + 1 @dataclass class A_ : _A :int = 42 _A :List[Any] = 42 class A_ ( _UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = {} lowercase = [] lowercase = 1 lowercase = [1, 2] lowercase = {'''a''': 1, '''b''': 2} lowercase = {'''a''': [1, 2], '''b''': [3, 4]} lowercase = {'''a''': {'''1''': 1}, '''b''': 2} lowercase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} lowercase = {} lowercase = [] lowercase = 2 lowercase = [2, 3] lowercase = {'''a''': 2, '''b''': 3} lowercase = {'''a''': [2, 3], '''b''': [4, 5]} lowercase = {'''a''': {'''1''': 2}, '''b''': 3} lowercase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) lowercase = 2 self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) lowercase = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} lowercase = {'''a''': 2, '''b''': 0, '''c''': 2} lowercase = { '''a''': np.eye(2 ).astype(lowerCamelCase_ ), '''b''': np.zeros(3 ).astype(lowerCamelCase_ ), '''c''': np.ones(2 ).astype(lowerCamelCase_ ), } self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ , num_proc=lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCamelCase_ , lowerCamelCase_ , map_numpy=lowerCamelCase_ , num_proc=lowerCamelCase_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(lowerCamelCase_ ): # can't pickle a local lambda map_nested(lambda snake_case__ : x + 1 , lowerCamelCase_ , num_proc=lowerCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = {'''a''': 1, '''b''': 2} lowercase = {'''a''': 3, '''b''': 4} lowercase = {'''a''': 5, '''b''': 6} lowercase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : int ): class A_ : _A :Optional[int] = '''bar''' lowercase = Foo() self.assertEqual(foo.my_attr , """bar""" ) with temporary_assignment(lowerCamelCase_ , """my_attr""" , """BAR""" ): self.assertEqual(foo.my_attr , """BAR""" ) self.assertEqual(foo.my_attr , """bar""" ) @pytest.mark.parametrize( """iterable_length, num_proc, expected_num_proc""" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch( """datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool: lowercase = {f"""{i}""": i for i in range(a_ )} lowercase = map_nested(lambda lowerCAmelCase__ : x + 10 ,a_ ,num_proc=a_ ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A_ ( _UpperCAmelCase ): @require_tf def SCREAMING_SNAKE_CASE__ ( self : Dict ): import tensorflow as tf from tensorflow.keras import layers lowercase = layers.Dense(2 ) def gen_random_output(): lowercase = tf.random.uniform((1, 3) ) return model(lowerCamelCase_ ).numpy() with temp_seed(42 , set_tensorflow=lowerCamelCase_ ): lowercase = gen_random_output() with temp_seed(42 , set_tensorflow=lowerCamelCase_ ): lowercase = gen_random_output() lowercase = gen_random_output() np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def SCREAMING_SNAKE_CASE__ ( self : str ): import torch def gen_random_output(): lowercase = torch.nn.Linear(3 , 2 ) lowercase = torch.rand(1 , 3 ) return model(lowerCamelCase_ ).detach().numpy() with temp_seed(42 , set_pytorch=lowerCamelCase_ ): lowercase = gen_random_output() with temp_seed(42 , set_pytorch=lowerCamelCase_ ): lowercase = gen_random_output() lowercase = gen_random_output() np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): lowercase = gen_random_output() with temp_seed(42 ): lowercase = gen_random_output() lowercase = gen_random_output() np.testing.assert_equal(lowerCamelCase_ , lowerCamelCase_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("""input_data""" ,[{}] ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = NestedDataStructure(a_ ).data assert output_data == input_data @pytest.mark.parametrize( """data, expected_output""" ,[ ({}, []), ([], []), ("""foo""", ["""foo"""]), (["""foo""", """bar"""], ["""foo""", """bar"""]), ([["""foo""", """bar"""]], ["""foo""", """bar"""]), ([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]), ([[["""foo"""], """bar"""]], ["""foo""", """bar"""]), ({"""a""": 1, """b""": 2}, [1, 2]), ({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]), ({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = NestedDataStructure(a_ ).flatten() assert output == expected_output def UpperCamelCase__ ( ): lowercase = A(x=1 ,y="""foobar""" ) lowercase = {'''x''': 1, '''y''': '''foobar'''} assert asdict(a_ ) == expected_output lowercase = {'''a''': {'''b''': A(x=10 ,y="""foo""" )}, '''c''': [A(x=20 ,y="""bar""" )]} lowercase = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(a_ ) == expected_output with pytest.raises(a_ ): asdict([1, A(x=10 ,y="""foo""" )] ) def UpperCamelCase__ ( lowerCAmelCase__ ): return text.split() def UpperCamelCase__ ( lowerCAmelCase__ ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def UpperCamelCase__ ( ): with Pool(2 ) as pool: lowercase = list(iflatmap_unordered(a_ ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(a_ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: lowercase = list(iflatmap_unordered(a_ ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(a_ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: lowercase = [] for yield_time, content in iflatmap_unordered( a_ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(a_ ) assert out.count("""a""" ) == 2 assert out.count("""b""" ) == 2 assert len(a_ ) == 4
718
# using dfs for finding eulerian path traversal def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase = True, True lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) return path def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = -1 for i in range(lowerCAmelCase__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowercase = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return lowercase = 1 if check == 2: lowercase = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) print(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase = { 1: [], 2: [] # all degree is zero } lowercase = 10 check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) if __name__ == "__main__": main()
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): return int((input_a, input_a).count(0 ) != 0 ) def UpperCamelCase__ ( ): assert nand_gate(0 ,0 ) == 1 assert nand_gate(0 ,1 ) == 1 assert nand_gate(1 ,0 ) == 1 assert nand_gate(1 ,1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
719
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class A_ ( unittest.TestCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_choices def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_attention_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A_ ( __a , unittest.TestCase ): _A :List[Any] = True _A :Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = FlaxRoFormerModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ ) lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ ) @require_flax class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case__ )[0] lowercase = 5_00_00 lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case__ ) lowercase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
72
0
from math import factorial def UpperCamelCase__ ( lowerCAmelCase__ = 20 ): lowercase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... lowercase = n // 2 return int(factorial(lowerCAmelCase__ ) / (factorial(lowerCAmelCase__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __SCREAMING_SNAKE_CASE : List[str] =int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number.''')
720
import argparse import hashlib # hashlib is only used inside the Test class import struct class A_ : def __init__( self : List[str] , snake_case__ : Union[str, Any] ): lowercase = data lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ): lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64 for i in range(16 , 80 ): lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.padding() lowercase = self.split_blocks() for block in self.blocks: lowercase = self.expand_block(snake_case__ ) lowercase , lowercase , lowercase , lowercase , lowercase = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowercase = (b & c) | ((~b) & d) lowercase = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: lowercase = b ^ c ^ d lowercase = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: lowercase = (b & c) | (b & d) | (c & d) lowercase = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: lowercase = b ^ c ^ d lowercase = 0Xc_a_6_2_c_1_d_6 lowercase , lowercase , lowercase , lowercase , lowercase = ( self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(snake_case__ , 30 ), c, d, ) lowercase = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def UpperCamelCase__ ( ): lowercase = b"""Test String""" assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324 def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) lowercase = parser.parse_args() lowercase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: lowercase = f.read() else: lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" ) print(SHAaHash(lowerCAmelCase__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
72
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def UpperCamelCase__ ( lowerCAmelCase__ ): return ConvertCommand( args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] =''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class A_ ( SCREAMING_SNAKE_CASE__ ): @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Tuple ): lowercase = parser.add_parser( """convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , ) train_parser.add_argument("""--model_type""" , type=_lowercase , required=_lowercase , help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""" , type=_lowercase , required=_lowercase , help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" , type=_lowercase , required=_lowercase , help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" , type=_lowercase , default="""""" , help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" , type=_lowercase , default=_lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , ) train_parser.set_defaults(func=_lowercase ) def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Dict , *snake_case__ : List[Any] , ): lowercase = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(F"""Loading model {model_type}""" ) lowercase = model_type lowercase = tf_checkpoint lowercase = pytorch_dump_output lowercase = config lowercase = finetuning_task_name def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowercase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowercase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowercase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_lowercase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowercase ) if "ckpt" in self._tf_checkpoint.lower(): lowercase = self._tf_checkpoint lowercase = """""" else: lowercase = self._tf_checkpoint lowercase = """""" convert_transfo_xl_checkpoint_to_pytorch( _lowercase , self._config , self._pytorch_dump_output , _lowercase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowercase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowercase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
721
class A_ : def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ): lowercase = name lowercase = val def __str__( self : str ): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : int , snake_case__ : Optional[int] ): return self.val < other.val class A_ : def __init__( self : str , snake_case__ : List[str] ): lowercase = {} lowercase = {} lowercase = self.build_heap(snake_case__ ) def __getitem__( self : Union[str, Any] , snake_case__ : int ): return self.get_value(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ): return (idx - 1) // 2 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ): return idx * 2 + 1 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] ): return idx * 2 + 2 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ): return self.heap_dict[key] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ): lowercase = len(snake_case__ ) - 1 lowercase = self.get_parent_idx(snake_case__ ) for idx, i in enumerate(snake_case__ ): lowercase = idx lowercase = i.val for i in range(snake_case__ , -1 , -1 ): self.sift_down(snake_case__ , snake_case__ ) return array def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : str ): while True: lowercase = self.get_left_child_idx(snake_case__ ) # noqa: E741 lowercase = self.get_right_child_idx(snake_case__ ) lowercase = idx if l < len(snake_case__ ) and array[l] < array[idx]: lowercase = l if r < len(snake_case__ ) and array[r] < array[smallest]: lowercase = r if smallest != idx: lowercase , lowercase = array[smallest], array[idx] ( ( lowercase ) , ( lowercase ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase = smallest else: break def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ): lowercase = self.get_parent_idx(snake_case__ ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase , lowercase = self.heap[idx], self.heap[p] lowercase , lowercase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase = p lowercase = self.get_parent_idx(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): return self.heap[0] def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase , lowercase = self.heap[-1], self.heap[0] lowercase , lowercase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): self.heap.append(snake_case__ ) lowercase = len(self.heap ) - 1 lowercase = node.val self.sift_up(len(self.heap ) - 1 ) def SCREAMING_SNAKE_CASE__ ( self : int ): return len(self.heap ) == 0 def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase = new_value lowercase = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE : Any =Node('''R''', -1) __SCREAMING_SNAKE_CASE : Union[str, Any] =Node('''B''', 6) __SCREAMING_SNAKE_CASE : str =Node('''A''', 3) __SCREAMING_SNAKE_CASE : List[Any] =Node('''X''', 1) __SCREAMING_SNAKE_CASE : str =Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE : Any =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
72
0
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __SCREAMING_SNAKE_CASE : str =6_378_137.0 __SCREAMING_SNAKE_CASE : Optional[int] =6_356_752.314_245 __SCREAMING_SNAKE_CASE : Optional[Any] =6_378_137 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude lowercase = atan((1 - flattening) * tan(radians(snake_case__ ) ) ) lowercase = atan((1 - flattening) * tan(radians(snake_case__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius lowercase = haversine_distance(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values lowercase = (b_lata + b_lata) / 2 lowercase = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) lowercase = (sin(snake_case__ ) ** 2) * (cos(snake_case__ ) ** 2) lowercase = cos(sigma / 2 ) ** 2 lowercase = (sigma - sin(snake_case__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) lowercase = (cos(snake_case__ ) ** 2) * (sin(snake_case__ ) ** 2) lowercase = sin(sigma / 2 ) ** 2 lowercase = (sigma + sin(snake_case__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
700
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig''' # Base docstring __SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat''' __SCREAMING_SNAKE_CASE : Tuple =[ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ): if drop_prob == 0.0 or not training: return input lowercase = 1 - drop_prob lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize lowercase = input.div(lowerCAmelCase__ ) * random_tensor return output class A_ ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ): super().__init__() lowercase = drop_prob def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ): return drop_path(snake_case__ , self.drop_prob , self.training ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return "p={}".format(self.drop_prob ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ): super().__init__() lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride) lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding) lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ ) lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ): lowercase = self.projection(snake_case__ ) lowercase = self.norm(snake_case__ ) return embeddings class A_ ( nn.GroupNorm ): def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ): super().__init__(1 , snake_case__ , **snake_case__ ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any ): super().__init__() lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ): return self.pool(snake_case__ ) - hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ): super().__init__() lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = PoolFormerDropPath(snake_case__ ) if isinstance(config.hidden_act , snake_case__ ): lowercase = ACTaFN[config.hidden_act] else: lowercase = config.hidden_act def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ): lowercase = self.conva(snake_case__ ) lowercase = self.act_fn(snake_case__ ) lowercase = self.drop(snake_case__ ) lowercase = self.conva(snake_case__ ) lowercase = self.drop(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ): super().__init__() lowercase = PoolFormerPooling(snake_case__ ) lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) # Useful for training neural nets lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity() lowercase = config.use_layer_scale if config.use_layer_scale: lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): if self.use_layer_scale: lowercase = self.pooling(self.before_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = () lowercase = self.output(self.after_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = (output,) + outputs return outputs else: lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) ) # First residual connection lowercase = pooling_output + hidden_states lowercase = () # Second residual connection inside the PoolFormerOutput block lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) ) lowercase = hidden_states + layer_output lowercase = (output,) + outputs return outputs class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): super().__init__() lowercase = config # stochastic depth decay rule lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase = nn.ModuleList(snake_case__ ) # Transformer blocks lowercase = [] lowercase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case__ ) ) lowercase = nn.ModuleList(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ): lowercase = () if output_hidden_states else None lowercase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase , lowercase = layers # Get patch embeddings from hidden_states lowercase = embedding_layer(snake_case__ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case__ ): lowercase = blk(snake_case__ ) lowercase = layer_outputs[0] if output_hidden_states: lowercase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class A_ ( __a ): _A :Any = PoolFormerConfig _A :int = '''poolformer''' _A :Union[str, Any] = '''pixel_values''' _A :str = True def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): if isinstance(snake_case__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ): if isinstance(snake_case__ , snake_case__ ): lowercase = value __SCREAMING_SNAKE_CASE : Optional[Any] =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __SCREAMING_SNAKE_CASE : str =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , ) class A_ ( __a ): def __init__( self : Union[str, Any] , snake_case__ : int ): super().__init__(snake_case__ ) lowercase = config lowercase = PoolFormerEncoder(snake_case__ ) # Initialize weights and apply final processing self.post_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowercase = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[int] ): super().__init__() lowercase = nn.Linear(config.hidden_size , config.hidden_size ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = self.dense(snake_case__ ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , __a , ) class A_ ( __a ): def __init__( self : Dict , snake_case__ : Any ): super().__init__(snake_case__ ) lowercase = config.num_labels lowercase = PoolFormerModel(snake_case__ ) # Final norm lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = self.poolformer( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = outputs[0] lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) ) lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase = """single_label_classification""" else: lowercase = """multi_label_classification""" if self.config.problem_type == "regression": lowercase = MSELoss() if self.num_labels == 1: lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase = loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": lowercase = CrossEntropyLoss() lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase = BCEWithLogitsLoss() lowercase = loss_fct(snake_case__ , snake_case__ ) if not return_dict: lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
72
0
# Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __SCREAMING_SNAKE_CASE : Optional[int] =re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class A_ : _A :str _A :Optional[str] = None _A :Optional[Union[str, int]] = None _A :Optional[Union[str, int]] = None _A :Optional[Union[str, int]] = None def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase , lowercase , lowercase = _str_to_version_tuple(self.version_str ) def __repr__( self : Tuple ): return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return self.major, self.minor, self.patch def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Dict ): if isinstance(snake_case__ , snake_case__ ): return Version(snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): return other raise TypeError(F"""{other} (type {type(snake_case__ )}) cannot be compared to version.""" ) def __eq__( self : Tuple , snake_case__ : List[Any] ): try: lowercase = self._validate_operand(snake_case__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] , snake_case__ : Dict ): lowercase = self._validate_operand(snake_case__ ) return self.tuple < other.tuple def __hash__( self : Tuple ): return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case__ : Optional[Any] ): lowercase = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def SCREAMING_SNAKE_CASE__ ( self : Any ): return self.version_str def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = _VERSION_REG.match(__SCREAMING_SNAKE_CASE ) if not res: raise ValueError(f"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(__SCREAMING_SNAKE_CASE ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def UpperCamelCase__ ( lowerCAmelCase__ ): return ".".join(str(__SCREAMING_SNAKE_CASE ) for v in version_tuple )
701
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __SCREAMING_SNAKE_CASE : Any =[ '''openmmlab/upernet-convnext-tiny''', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __SCREAMING_SNAKE_CASE : Optional[int] ='''UperNetConfig''' class A_ ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple = 0 , snake_case__ : int = False , snake_case__ : int = 1 , ): super().__init__() lowercase = nn.Convad( in_channels=snake_case__ , out_channels=snake_case__ , kernel_size=snake_case__ , padding=snake_case__ , bias=snake_case__ , dilation=snake_case__ , ) lowercase = nn.BatchNormad(snake_case__ ) lowercase = nn.ReLU() def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : int ): lowercase = self.conv(snake_case__ ) lowercase = self.batch_norm(snake_case__ ) lowercase = self.activation(snake_case__ ) return output class A_ ( nn.Module ): def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ): super().__init__() lowercase = [ nn.AdaptiveAvgPoolad(snake_case__ ), UperNetConvModule(snake_case__ , snake_case__ , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(snake_case__ ) , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ): lowercase = input for layer in self.layers: lowercase = layer(snake_case__ ) return hidden_state class A_ ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple ): super().__init__() lowercase = pool_scales lowercase = align_corners lowercase = in_channels lowercase = channels lowercase = [] for i, pool_scale in enumerate(snake_case__ ): lowercase = UperNetPyramidPoolingBlock(pool_scale=snake_case__ , in_channels=snake_case__ , channels=snake_case__ ) self.blocks.append(snake_case__ ) self.add_module(str(snake_case__ ) , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ): lowercase = [] for ppm in self.blocks: lowercase = ppm(snake_case__ ) lowercase = nn.functional.interpolate( snake_case__ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners ) ppm_outs.append(snake_case__ ) return ppm_outs class A_ ( nn.Module ): def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Dict ): super().__init__() lowercase = config lowercase = config.pool_scales # e.g. (1, 2, 3, 6) lowercase = in_channels lowercase = config.hidden_size lowercase = False lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module lowercase = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) lowercase = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module lowercase = nn.ModuleList() lowercase = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowercase = UperNetConvModule(snake_case__ , self.channels , kernel_size=1 ) lowercase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(snake_case__ ) self.fpn_convs.append(snake_case__ ) lowercase = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): self.apply(self._init_weights ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] ): if isinstance(snake_case__ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Dict ): lowercase = inputs[-1] lowercase = [x] psp_outs.extend(self.psp_modules(snake_case__ ) ) lowercase = torch.cat(snake_case__ , dim=1 ) lowercase = self.bottleneck(snake_case__ ) return output def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[str] ): # build laterals lowercase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(snake_case__ ) ) # build top-down path lowercase = len(snake_case__ ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowercase = laterals[i - 1].shape[2:] lowercase = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=snake_case__ , mode="""bilinear""" , align_corners=self.align_corners ) # build outputs lowercase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowercase = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners ) lowercase = torch.cat(snake_case__ , dim=1 ) lowercase = self.fpn_bottleneck(snake_case__ ) lowercase = self.classifier(snake_case__ ) return output class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple = 2 , snake_case__ : Optional[int] = 3 , snake_case__ : str = 1 ): super().__init__() lowercase = config lowercase = config.auxiliary_in_channels lowercase = config.auxiliary_channels lowercase = config.auxiliary_num_convs lowercase = config.auxiliary_concat_input lowercase = in_index lowercase = (kernel_size // 2) * dilation lowercase = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=snake_case__ , padding=snake_case__ , dilation=snake_case__ ) ) if self.num_convs == 0: lowercase = nn.Identity() else: lowercase = nn.Sequential(*snake_case__ ) if self.concat_input: lowercase = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=snake_case__ , padding=kernel_size // 2 ) lowercase = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): self.apply(self._init_weights ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[Any] ): if isinstance(snake_case__ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : str ): # just take the relevant feature maps lowercase = encoder_hidden_states[self.in_index] lowercase = self.convs(snake_case__ ) if self.concat_input: lowercase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) lowercase = self.classifier(snake_case__ ) return output class A_ ( __a ): _A :List[str] = UperNetConfig _A :List[str] = '''pixel_values''' _A :Any = True def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[int] ): if isinstance(snake_case__ , snake_case__ ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def SCREAMING_SNAKE_CASE__ ( self : Dict ): self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : str=False ): if isinstance(snake_case__ , snake_case__ ): lowercase = value __SCREAMING_SNAKE_CASE : Dict =R''' Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __SCREAMING_SNAKE_CASE : str =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , __a , ) class A_ ( __a ): def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] ): super().__init__(snake_case__ ) lowercase = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowercase = UperNetHead(snake_case__ , in_channels=self.backbone.channels ) lowercase = UperNetFCNHead(snake_case__ ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) ) @replace_return_docstrings(output_type=snake_case__ , config_class=_CONFIG_FOR_DOC ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] = None , snake_case__ : Tuple = None , snake_case__ : Tuple = None , snake_case__ : Union[str, Any] = None , snake_case__ : Union[str, Any] = None , ): lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = output_attentions if output_attentions is not None else self.config.output_attentions lowercase = self.backbone.forward_with_filtered_kwargs( snake_case__ , output_hidden_states=snake_case__ , output_attentions=snake_case__ ) lowercase = outputs.feature_maps lowercase = self.decode_head(snake_case__ ) lowercase = nn.functional.interpolate(snake_case__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case__ ) lowercase = None if self.auxiliary_head is not None: lowercase = self.auxiliary_head(snake_case__ ) lowercase = nn.functional.interpolate( snake_case__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=snake_case__ ) lowercase = None if labels is not None: if self.config.num_labels == 1: raise ValueError("""The number of labels should be greater than one""" ) else: # compute weighted loss lowercase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowercase = loss_fct(snake_case__ , snake_case__ ) lowercase = loss_fct(snake_case__ , snake_case__ ) lowercase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowercase = (logits,) + outputs[1:] else: lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
702
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class A_ : '''simple docstring''' def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[int]=99 , snake_case__ : Any=13 , snake_case__ : Dict=7 , snake_case__ : Tuple=9 , snake_case__ : int=True , snake_case__ : Dict=True , snake_case__ : Tuple=False , snake_case__ : Dict=32 , snake_case__ : List[Any]=5 , snake_case__ : int=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple=8 , snake_case__ : int=0.1 , snake_case__ : int=0.002 , snake_case__ : int=1 , snake_case__ : Tuple=0 , snake_case__ : Dict=0 , snake_case__ : List[Any]=None , snake_case__ : Optional[int]=None , ): lowercase = parent lowercase = batch_size lowercase = encoder_seq_length lowercase = decoder_seq_length # For common tests lowercase = self.decoder_seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = d_ff lowercase = relative_attention_num_buckets lowercase = dropout_rate lowercase = initializer_factor lowercase = eos_token_id lowercase = pad_token_id lowercase = decoder_start_token_id lowercase = None lowercase = decoder_layers def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return TaConfig.from_pretrained("""google/umt5-base""" ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : Any=None , snake_case__ : Any=None , ): if attention_mask is None: lowercase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowercase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowercase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A ) if decoder_head_mask is None: lowercase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A ) if cross_attn_head_mask is None: lowercase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__A ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowercase = input_ids.clamp(self.pad_token_id + 1 ) lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowercase = self.get_config() lowercase = config.num_attention_heads lowercase = self.prepare_inputs_dict(__A , __A , __A ) return config, input_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : str ): return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , ): lowercase = UMTaModel(config=__A ) model.to(__A ) model.eval() lowercase = model( input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , ) lowercase = model(input_ids=__A , decoder_input_ids=__A ) lowercase = result.last_hidden_state lowercase = result.past_key_values lowercase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__A ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , ): lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval() # first forward pass lowercase = model(__A , use_cache=__A ) lowercase = model(__A ) lowercase = model(__A , use_cache=__A ) self.parent.assertTrue(len(__A ) == len(__A ) ) self.parent.assertTrue(len(__A ) == len(__A ) + 1 ) lowercase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and lowercase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase = model(__A )["last_hidden_state"] lowercase = model(__A , past_key_values=__A )["last_hidden_state"] # select random slice lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase = output_from_no_past[:, -1, random_slice_idx].detach() lowercase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Optional[int] , snake_case__ : List[str] , ): lowercase = UMTaModel(config=__A ).to(__A ).half().eval() lowercase = model(**__A )["last_hidden_state"] self.parent.assertFalse(torch.isnan(__A ).any().item() ) @require_torch class A_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' _A :Any = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _A :str = (UMTaForConditionalGeneration,) if is_torch_available() else () _A :int = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) _A :int = True _A :Union[str, Any] = False _A :Optional[int] = False _A :Tuple = True _A :int = True # The small UMT5 model needs higher percentages for CPU/MP tests _A :Optional[Any] = [0.8, 0.9] def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = UMTaModelTester(self ) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = self.model_tester.prepare_config_and_inputs() lowercase = UMTaModel(config_and_inputs[0] ).to(__A ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=__A , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__A ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = ["encoder_attentions", "decoder_attentions", "cross_attentions"] lowercase = self.model_tester.prepare_config_and_inputs() lowercase = config_and_inputs[0] lowercase = UMTaForConditionalGeneration(__A ).eval() model.to(__A ) lowercase = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=__A ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ), } for attn_name, (name, mask) in zip(__A , head_masking.items() ): lowercase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowercase = torch.ones( config.num_decoder_layers , config.num_heads , device=__A ) lowercase = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): pass @require_torch @require_sentencepiece @require_tokenizers class A_ ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__A ).to(__A ) lowercase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__A , legacy=__A ) lowercase = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] lowercase = tokenizer(__A , return_tensors="""pt""" , padding=__A ).input_ids # fmt: off lowercase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(__A , __A ) lowercase = model.generate(input_ids.to(__A ) ) lowercase = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] lowercase = tokenizer.batch_decode(__A ) self.assertEqual(__A , __A )
703
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A_ ( __a ): _A :Tuple = '''data2vec-audio''' def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ): super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) lowercase = hidden_size lowercase = feat_extract_activation lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = conv_bias lowercase = num_conv_pos_embeddings lowercase = num_conv_pos_embedding_groups lowercase = conv_pos_kernel_size lowercase = len(self.conv_dim ) lowercase = num_hidden_layers lowercase = intermediate_size lowercase = hidden_act lowercase = num_attention_heads lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = feat_proj_dropout lowercase = final_dropout lowercase = layerdrop lowercase = layer_norm_eps lowercase = initializer_range lowercase = vocab_size lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase = mask_time_prob lowercase = mask_time_length lowercase = mask_time_min_masks lowercase = mask_feature_prob lowercase = mask_feature_length lowercase = mask_feature_min_masks # ctc loss lowercase = ctc_loss_reduction lowercase = ctc_zero_infinity # adapter lowercase = add_adapter lowercase = adapter_kernel_size lowercase = adapter_stride lowercase = num_adapter_layers lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return math.prod(self.conv_stride )
72
0
import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home __SCREAMING_SNAKE_CASE : Optional[Any] =HUGGINGFACE_HUB_CACHE __SCREAMING_SNAKE_CASE : Tuple ="config.json" __SCREAMING_SNAKE_CASE : Optional[Any] ="diffusion_pytorch_model.bin" __SCREAMING_SNAKE_CASE : int ="diffusion_flax_model.msgpack" __SCREAMING_SNAKE_CASE : Dict ="model.onnx" __SCREAMING_SNAKE_CASE : Optional[int] ="diffusion_pytorch_model.safetensors" __SCREAMING_SNAKE_CASE : Union[str, Any] ="weights.pb" __SCREAMING_SNAKE_CASE : Any ="https://huggingface.co" __SCREAMING_SNAKE_CASE : List[str] =default_cache_path __SCREAMING_SNAKE_CASE : Union[str, Any] ="diffusers_modules" __SCREAMING_SNAKE_CASE : Optional[int] =os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) __SCREAMING_SNAKE_CASE : Optional[Any] =["fp16", "non-ema"] __SCREAMING_SNAKE_CASE : List[Any] =".self_attn"
704
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" ) lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] ) lowercase = checkpoint["""model"""] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()} lowercase = XGLMConfig( vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase = XGLMForCausalLM(lowerCAmelCase__ ) lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
def UpperCamelCase__ ( ): for n in range(1 ,1_000_000 ): yield n * (n + 1) // 2 def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 1 lowercase = 2 while i * i <= n: lowercase = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def UpperCamelCase__ ( ): return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase__ ) > 500 ) if __name__ == "__main__": print(solution())
705
from __future__ import annotations import bisect def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left <= right: lowercase = left + (right - left) // 2 lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase = midpoint - 1 else: lowercase = midpoint + 1 return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ) if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if right < left: return None lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip() __SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''',''')) __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n''')) __SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
72
0
import math import random from typing import Any from .hill_climbing import SearchProblem def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = True ,lowerCAmelCase__ = math.inf ,lowerCAmelCase__ = -math.inf ,lowerCAmelCase__ = math.inf ,lowerCAmelCase__ = -math.inf ,lowerCAmelCase__ = False ,lowerCAmelCase__ = 100 ,lowerCAmelCase__ = 0.01 ,lowerCAmelCase__ = 1 ,): lowercase = False lowercase = search_prob lowercase = start_temperate lowercase = [] lowercase = 0 lowercase = None while not search_end: lowercase = current_state.score() if best_state is None or current_score > best_state.score(): lowercase = current_state scores.append(snake_case__ ) iterations += 1 lowercase = None lowercase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowercase = random.randint(0 ,len(snake_case__ ) - 1 ) # picking a random neighbor lowercase = neighbors.pop(snake_case__ ) lowercase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowercase = change * -1 # in case we are finding minimum if change > 0: # improves the solution lowercase = picked_neighbor else: lowercase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowercase = picked_neighbor lowercase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowercase = True else: lowercase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(snake_case__ ) ,snake_case__ ) plt.xlabel("""Iterations""" ) plt.ylabel("""Function values""" ) plt.show() return best_state if __name__ == "__main__": def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) __SCREAMING_SNAKE_CASE : List[str] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) __SCREAMING_SNAKE_CASE : Any =simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) __SCREAMING_SNAKE_CASE : Tuple =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) __SCREAMING_SNAKE_CASE : Dict =simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): return (3 * x**2) - (6 * y) __SCREAMING_SNAKE_CASE : Optional[int] =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __SCREAMING_SNAKE_CASE : int =simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' ) __SCREAMING_SNAKE_CASE : Any =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __SCREAMING_SNAKE_CASE : Optional[Any] =simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' )
706
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = multiprocessing.Manager() lowercase = manager.list() lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase = shutil.rmtree lowercase = os.rmdir lowercase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase = {} with swallow_io(): with time_limit(lowerCAmelCase__ ): exec(lowerCAmelCase__ ,lowerCAmelCase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase = rmtree lowercase = rmdir lowercase = chdir @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ ) signal.signal(signal.SIGALRM ,lowerCAmelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0 ) @contextlib.contextmanager def UpperCamelCase__ ( ): lowercase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase__ ): with contextlib.redirect_stderr(lowerCAmelCase__ ): with redirect_stdin(lowerCAmelCase__ ): yield @contextlib.contextmanager def UpperCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase__ ): yield dirname class A_ ( __a ): pass class A_ ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ): return False class A_ ( contextlib._RedirectStream ): # type: ignore _A :List[Any] = '''stdin''' @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): if root == ".": yield return lowercase = os.getcwd() os.chdir(lowerCAmelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase = None lowercase = None import os lowercase = """1""" lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None import shutil lowercase = None lowercase = None lowercase = None import subprocess lowercase = None # type: ignore lowercase = None import sys lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None
72
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE : Optional[int] ={ '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int =[ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
707
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __a ): _A :Optional[int] = ['''image_processor''', '''tokenizer'''] _A :Tuple = '''BlipImageProcessor''' _A :List[Any] = '''AutoTokenizer''' def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Dict ): lowercase = False super().__init__(snake_case__ , snake_case__ ) lowercase = self.image_processor def __call__( self : List[str] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowercase = self.tokenizer lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : int , **snake_case__ : int ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
72
0
from collections.abc import Callable import numpy as np def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): '''simple docstring''' lowercase = int(np.ceil((x_end - xa) / step_size ) ) lowercase = np.zeros((n + 1,) ) lowercase = ya lowercase = xa for k in range(UpperCAmelCase__ ): lowercase = y[k] + step_size * ode_func(UpperCAmelCase__ ,y[k] ) lowercase = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ ,y[k] ) + ode_func(x + step_size ,UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
708
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCamelCase__ ( lowerCAmelCase__ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase = model_type_to_module_name(lowerCAmelCase__ ) lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" ) try: return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase = importlib.import_module("""transformers""" ) if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ): return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,): lowercase = get_file_from_repo( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader: return json.load(lowerCAmelCase__ ) class A_ : def __init__( self : List[Any] ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ): lowercase = kwargs.pop("""config""" , snake_case__ ) lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ ) lowercase = True lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ ) lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ ) lowercase = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case__ , snake_case__ ): lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) # It could be in `config.feature_extractor_type`` lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ ) if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: lowercase = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: lowercase = feature_extractor_class_from_name(snake_case__ ) lowercase = feature_extractor_auto_map is not None lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING lowercase = resolve_trust_remote_code( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if has_remote_code and trust_remote_code: lowercase = get_class_from_dynamic_module( snake_case__ , snake_case__ , **snake_case__ ) lowercase = kwargs.pop("""code_revision""" , snake_case__ ) if os.path.isdir(snake_case__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )] return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ): FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
72
0
import random def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [], [], [] for element in data: if element < pivot: less.append(_lowercase ) elif element > pivot: greater.append(_lowercase ) else: equal.append(_lowercase ) return less, equal, greater def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if index >= len(_lowercase ) or index < 0: return None lowercase = items[random.randint(0 ,len(_lowercase ) - 1 )] lowercase = 0 lowercase = _partition(_lowercase ,_lowercase ) lowercase = len(_lowercase ) lowercase = len(_lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_lowercase ,_lowercase ) # must be in larger else: return quick_select(_lowercase ,index - (m + count) )
709
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''') __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : List[str] ={ **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Optional[int] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : List[str] =[ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value elif weight_type == "running_mean": lowercase = value elif weight_type == "running_var": lowercase = value elif weight_type == "num_batches_tracked": lowercase = value else: lowercase = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] if task == "s2t": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2T lowercase = IGNORE_KEYS_S2T elif task == "t2s": lowercase = None lowercase = MAPPING_T2S lowercase = IGNORE_KEYS_T2S elif task == "s2s": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2S lowercase = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowercase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: lowercase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase = True if "*" in mapped_key: lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: lowercase = """weight""" elif "running_mean" in name: lowercase = """running_mean""" elif "running_var" in name: lowercase = """running_var""" elif "num_batches_tracked" in name: lowercase = """num_batches_tracked""" else: lowercase = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,): if config_path is not None: lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = SpeechTaConfig() if task == "s2t": lowercase = config.max_text_positions lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": lowercase = 1_876 lowercase = 600 lowercase = config.max_speech_positions lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": lowercase = 1_876 lowercase = config.max_speech_positions lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) lowercase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowercase = SpeechTaFeatureExtractor() lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowercase = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
72
0
from collections.abc import Iterable from typing import Any class A_ : def __init__( self : Optional[int] , snake_case__ : int | None = None ): lowercase = value lowercase = None # Added in order to delete a node easier lowercase = None lowercase = None def __repr__( self : str ): from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class A_ : def __init__( self : int , snake_case__ : Node | None = None ): lowercase = root def __str__( self : str ): return str(self.root ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Node , snake_case__ : Node | None ): if new_children is not None: # reset its kids lowercase = node.parent if node.parent is not None: # reset its parent if self.is_right(__lowerCAmelCase ): # If it is the right children lowercase = new_children else: lowercase = new_children else: lowercase = new_children def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Node ): if node.parent and node.parent.right: return node == node.parent.right return False def SCREAMING_SNAKE_CASE__ ( self : str ): return self.root is None def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[Any] ): lowercase = Node(__lowerCAmelCase ) # create a new Node if self.empty(): # if Tree is empty lowercase = new_node # set its root else: # Tree is not empty lowercase = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: lowercase = new_node # We insert the new node in a leaf break else: lowercase = parent_node.left else: if parent_node.right is None: lowercase = new_node break else: lowercase = parent_node.right lowercase = parent_node def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case__ : Any ): for value in values: self.__insert(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[str] ): if self.empty(): raise IndexError("""Warning: Tree is empty! please use another.""" ) else: lowercase = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: lowercase = node.left if value < node.value else node.right return node def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Node | None = None ): if node is None: if self.root is None: return None lowercase = self.root if not self.empty(): while node.right is not None: lowercase = node.right return node def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Node | None = None ): if node is None: lowercase = self.root if self.root is None: return None if not self.empty(): lowercase = self.root while node.left is not None: lowercase = node.left return node def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : int ): lowercase = self.search(__lowerCAmelCase ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__lowerCAmelCase , __lowerCAmelCase ) elif node.left is None: # Has only right children self.__reassign_nodes(__lowerCAmelCase , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__lowerCAmelCase , node.left ) else: lowercase = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore lowercase = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Node | None ): if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Tuple=None ): if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : list , snake_case__ : Node | None ): if node: self.inorder(__lowerCAmelCase , node.left ) arr.append(node.value ) self.inorder(__lowerCAmelCase , node.right ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : Node ): lowercase = [] self.inorder(__lowerCAmelCase , __lowerCAmelCase ) # append all values to list using inorder traversal return arr[k - 1] def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [] if curr_node is not None: lowercase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def UpperCamelCase__ ( ): lowercase = (8, 3, 6, 1, 10, 14, 13, 4, 7) lowercase = BinarySearchTree() for i in testlist: t.insert(UpperCAmelCase__ ) # Prints all the elements of the list in order traversal print(UpperCAmelCase__ ) if t.search(6 ) is not None: print("""The value 6 exists""" ) else: print("""The value 6 doesn't exist""" ) if t.search(-1 ) is not None: print("""The value -1 exists""" ) else: print("""The value -1 doesn't exist""" ) if not t.empty(): print("""Max Value: """ ,t.get_max().value ) # type: ignore print("""Min Value: """ ,t.get_min().value ) # type: ignore for i in testlist: t.remove(UpperCAmelCase__ ) print(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
710
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE : List[Any] ='''.''' if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : Dict =[] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE : Optional[Any] =line.strip() __SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __UpperCAmelCase ): _A :Optional[Any] = ["""image_processor""", """tokenizer"""] _A :Tuple = """LayoutLMv3ImageProcessor""" _A :int = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""") def __init__( self : Any , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None , **snake_case__ : Union[str, Any] ): lowercase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCAmelCase_ , ) lowercase = kwargs.pop("""feature_extractor""" ) lowercase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[int] = None , snake_case__ : int = None , snake_case__ : List[Any] = None , snake_case__ : Any = None , snake_case__ : Union[str, Any] = True , snake_case__ : Any = False , snake_case__ : Tuple = None , snake_case__ : List[Any] = None , snake_case__ : List[Any] = 0 , snake_case__ : List[str] = None , snake_case__ : List[str] = None , snake_case__ : Any = None , snake_case__ : Optional[int] = False , snake_case__ : Any = False , snake_case__ : List[str] = False , snake_case__ : List[Any] = False , snake_case__ : Optional[Any] = True , snake_case__ : List[str] = None , **snake_case__ : Any , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor lowercase = self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase = features["""words"""] lowercase = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel values lowercase = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: lowercase = self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] ) lowercase = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" F""" {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}""" ) return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case__ : int , **snake_case__ : Union[str, Any] ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : int , **snake_case__ : str ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , ) return self.image_processor
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
0
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class A_ ( UpperCamelCase_ ): def __init__( self : Union[str, Any] , snake_case__ : int = 1_01 ): lowercase = length def __len__( self : Dict ): return self.length def __getitem__( self : int , snake_case__ : str ): return i class A_ : def __call__( self : Dict , snake_case__ : List[Any] ): return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )} class A_ ( nn.Module ): def __init__( self : List[str] ): super().__init__() # Add some (unused) params otherwise DDP will complain. lowercase = nn.Linear(1_20 , 80 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Any=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class A_ ( UpperCamelCase_ ): @require_torch_neuroncore def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() lowercase = self.get_auto_remove_tmp_dir() lowercase = F"""--output_dir {output_dir}""".split() lowercase = ['''torchrun'''] + distributed_args + args execute_subprocess_async(UpperCamelCase__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class A_ ( UpperCamelCase_ ): @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() lowercase = self.get_auto_remove_tmp_dir() lowercase = F"""--output_dir {output_dir}""".split() lowercase = ['''torchrun'''] + distributed_args + args execute_subprocess_async(UpperCamelCase__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __SCREAMING_SNAKE_CASE : Any =HfArgumentParser((TrainingArguments,)) __SCREAMING_SNAKE_CASE : Tuple =parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __SCREAMING_SNAKE_CASE : List[str] =DummyDataset(dataset_length) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = list(range(len(_lowercase ) ) ) lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( """Predictions and/or labels do not match expected results:\n - predictions: """ f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} __SCREAMING_SNAKE_CASE : Any =Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __SCREAMING_SNAKE_CASE : str =trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __SCREAMING_SNAKE_CASE : Tuple =trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __SCREAMING_SNAKE_CASE : Any =2 __SCREAMING_SNAKE_CASE : Any =trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __SCREAMING_SNAKE_CASE : Optional[int] =trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __SCREAMING_SNAKE_CASE : List[str] =None
712
import argparse import os import re import packaging.version __SCREAMING_SNAKE_CASE : Optional[int] ='''examples/''' __SCREAMING_SNAKE_CASE : Any ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __SCREAMING_SNAKE_CASE : Any ='''README.md''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.read() lowercase , lowercase = REPLACE_PATTERNS[pattern] lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ ) lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): for folder, directories, fnames in os.walk(lowerCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) if not patch: update_version_in_examples(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = """🤗 Transformers currently provides the following architectures""" lowercase = """1. Want to contribute a new model?""" with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.readlines() # Find the start of the list. lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowerCAmelCase__ ) def UpperCamelCase__ ( ): with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: lowercase = f.read() lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0] return packaging.version.parse(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=False ): lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase = default_version.base_version elif patch: lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def UpperCamelCase__ ( ): lowercase = get_version() lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase = current_version.base_version # Check with the user we got that right. lowercase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
72
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE : Any ={ '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any =[ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __SCREAMING_SNAKE_CASE : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
713
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ={ '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class A_ ( __a ): _A :List[str] = '''pix2struct_text_model''' _A :int = ['''past_key_values'''] _A :Optional[Any] = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ): lowercase = vocab_size lowercase = hidden_size lowercase = d_kv lowercase = d_ff lowercase = num_layers lowercase = num_heads lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = dropout_rate lowercase = layer_norm_epsilon lowercase = initializer_factor lowercase = use_cache lowercase = eos_token_id lowercase = decoder_start_token_id # for backwards compatibility lowercase = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :Optional[int] = '''pix2struct_vision_model''' def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowercase = hidden_size lowercase = patch_embed_hidden_size lowercase = d_ff lowercase = dropout_rate lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = initializer_range lowercase = initializer_factor lowercase = attention_dropout lowercase = layer_norm_eps lowercase = dense_act_fn lowercase = seq_len lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :int = '''pix2struct''' _A :str = True def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: lowercase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) lowercase = PixaStructTextConfig(**snake_case__ ) lowercase = PixaStructVisionConfig(**snake_case__ ) lowercase = self.text_config.decoder_start_token_id lowercase = self.text_config.pad_token_id lowercase = self.text_config.eos_token_id lowercase = initializer_factor lowercase = initializer_range lowercase = self.initializer_range lowercase = self.initializer_range lowercase = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
72
0
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” __SCREAMING_SNAKE_CASE : int =1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py __SCREAMING_SNAKE_CASE : Optional[int] =0 __SCREAMING_SNAKE_CASE : Union[str, Any] =0XE_0_0_0 __SCREAMING_SNAKE_CASE : Optional[Any] =0XE_0_0_1 __SCREAMING_SNAKE_CASE : str =0XE_0_0_2 __SCREAMING_SNAKE_CASE : Union[str, Any] =0XE_0_0_3 __SCREAMING_SNAKE_CASE : Any =0XE_0_0_4 # Maps special codepoints to human-readable names. __SCREAMING_SNAKE_CASE : str ={ # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: '''[CLS]''', SEP: '''[SEP]''', BOS: '''[BOS]''', MASK: '''[MASK]''', PAD: '''[PAD]''', RESERVED: '''[RESERVED]''', } # Maps special codepoint human-readable names to their codepoint values. __SCREAMING_SNAKE_CASE : Dict ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A_ ( UpperCAmelCase__ ): _A :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , snake_case__ : Any=chr(snake_case__ ) , snake_case__ : List[str]=chr(snake_case__ ) , snake_case__ : List[str]=chr(snake_case__ ) , snake_case__ : Dict=chr(snake_case__ ) , snake_case__ : Tuple=chr(snake_case__ ) , snake_case__ : Any=chr(snake_case__ ) , snake_case__ : Optional[Any]=False , snake_case__ : Any=20_48 , **snake_case__ : Any , ): lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , model_max_length=snake_case__ , **snake_case__ , ) # Creates a mapping for looking up the IDs of special symbols. lowercase = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowercase = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowercase = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowercase = UNICODE_VOCAB_SIZE lowercase = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : str ): return list(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : str ): try: return ord(snake_case__ ) except TypeError: raise ValueError(F"""invalid token: \'{token}\'""" ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case__ ) except TypeError: raise ValueError(F"""invalid id: {index}""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ): return "".join(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) lowercase = [1] + ([0] * len(snake_case__ )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): return ()
714
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): # Test when on a single CPU or GPU that the context manager does nothing lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): # Test on distributed setup that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
72
0
from statistics import mean, stdev def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 3 ): lowercase = min(SCREAMING_SNAKE_CASE_ ) lowercase = max(SCREAMING_SNAKE_CASE_ ) # normalize data return [round((x - x_min) / (x_max - x_min) ,SCREAMING_SNAKE_CASE_ ) for x in data] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 3 ): lowercase = mean(SCREAMING_SNAKE_CASE_ ) lowercase = stdev(SCREAMING_SNAKE_CASE_ ) # standardize data return [round((x - mu) / (sigma) ,SCREAMING_SNAKE_CASE_ ) for x in data]
715
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): _A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = 0 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write("""{}""" ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) lowercase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): class A_ ( __a ): _A :List[str] = False class A_ ( __a ): _A :Dict = False class A_ ( __a ): _A :Union[str, Any] = '''AutoFeatureExtractor''' _A :Tuple = '''AutoTokenizer''' _A :Optional[Any] = False try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class A_ ( unittest.TestCase ): _A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): lowercase = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ): try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , ) lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f: lowercase = json.load(snake_case__ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
72
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = tempfile.mkdtemp() # fmt: off lowercase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase = dict(zip(A_ , range(len(A_ ) ) ) ) lowercase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase = {"unk_token": "<unk>"} lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A_ ) ) lowercase = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowercase = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(A_ , A_ ) def SCREAMING_SNAKE_CASE__ ( self : Dict , **snake_case__ : int ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ ) def SCREAMING_SNAKE_CASE__ ( self : int , **snake_case__ : Optional[Any] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , **snake_case__ : List[str] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowercase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = self.get_tokenizer() lowercase = self.get_rust_tokenizer() lowercase = self.get_image_processor() lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A_ ) self.assertIsInstance(processor_fast.tokenizer , A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A_ ) self.assertIsInstance(processor_fast.image_processor , A_ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) lowercase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) lowercase = self.prepare_image_inputs() lowercase = image_processor(A_ , return_tensors="""np""" ) lowercase = processor(images=A_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) lowercase = "lower newer" lowercase = processor(text=A_ ) lowercase = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) lowercase = "lower newer" lowercase = self.prepare_image_inputs() lowercase = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) lowercase = self.prepare_image_inputs() lowercase = self.prepare_image_inputs() lowercase = processor(images=A_ , visual_prompt=A_ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase = processor.batch_decode(A_ ) lowercase = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ )
716
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" ,[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" ,[ DatasetInfo(), DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_info.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfo.from_directory(lowerCAmelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) ) def UpperCamelCase__ ( ): lowercase = DatasetInfo( description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,) lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) lowercase = yaml.safe_dump(lowerCAmelCase__ ) lowercase = yaml.safe_load(lowerCAmelCase__ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase__ ( ): lowercase = DatasetInfo() lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" ,[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_infos_dict.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
72
0
from typing import List from .keymap import KEYMAP, get_character def UpperCamelCase__ ( lowerCAmelCase__ ): def decorator(lowerCAmelCase__ ): lowercase = getattr(lowerCAmelCase__ ,"""handle_key""" ,[] ) handle += [key] setattr(lowerCAmelCase__ ,"""handle_key""" ,lowerCAmelCase__ ) return func return decorator def UpperCamelCase__ ( *lowerCAmelCase__ ): def decorator(lowerCAmelCase__ ): lowercase = getattr(lowerCAmelCase__ ,"""handle_key""" ,[] ) handle += keys setattr(lowerCAmelCase__ ,"""handle_key""" ,lowerCAmelCase__ ) return func return decorator class A_ ( UpperCamelCase__ ): def __new__( cls : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str ): lowercase = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ ) if not hasattr(snake_case__ , """key_handler""" ): setattr(snake_case__ , """key_handler""" , {} ) setattr(snake_case__ , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): lowercase = getattr(snake_case__ , """handle_key""" , [] ) for key in handled_keys: lowercase = value return new_cls @staticmethod def SCREAMING_SNAKE_CASE__ ( cls : Any ): lowercase = get_character() if char != KEYMAP["undefined"]: lowercase = ord(snake_case__ ) lowercase = cls.key_handler.get(snake_case__ ) if handler: lowercase = char return handler(cls ) else: return None def UpperCamelCase__ ( cls ): return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
717
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = args.pruning_method lowercase = args.threshold lowercase = args.model_name_or_path.rstrip("""/""" ) lowercase = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) lowercase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: lowercase = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase , lowercase = -0.1, 1.1 lowercase = torch.sigmoid(lowerCAmelCase__ ) lowercase = s * (r - l) + l lowercase = s_bar.clamp(min=0.0 ,max=1.0 ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowercase = os.path.join( os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) __SCREAMING_SNAKE_CASE : str =parser.parse_args() main(args)
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ): return "".join([hex(__lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(__lowerCAmelCase )] ) def UpperCamelCase__ ( lowerCAmelCase__ ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(__lowerCAmelCase ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid:\nData does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__lowerCAmelCase ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__lowerCAmelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
718
# using dfs for finding eulerian path traversal def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase = True, True lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) return path def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = -1 for i in range(lowerCAmelCase__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowercase = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return lowercase = 1 if check == 2: lowercase = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) print(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase = { 1: [], 2: [] # all degree is zero } lowercase = 10 check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) if __name__ == "__main__": main()
72
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __SCREAMING_SNAKE_CASE : Union[str, Any] =abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def UpperCamelCase__ ( lowerCAmelCase__ ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowercase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(lowercase__ ,id=lowercase__ )
719
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class A_ ( unittest.TestCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_choices def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_attention_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A_ ( __a , unittest.TestCase ): _A :List[Any] = True _A :Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = FlaxRoFormerModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ ) lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ ) @require_flax class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case__ )[0] lowercase = 5_00_00 lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case__ ) lowercase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): while second != 0: lowercase = first & second first ^= second lowercase = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter the first number: ''').strip()) __SCREAMING_SNAKE_CASE : List[Any] =int(input('''Enter the second number: ''').strip()) print(f'''{add(first, second) = }''')
720
import argparse import hashlib # hashlib is only used inside the Test class import struct class A_ : def __init__( self : List[str] , snake_case__ : Union[str, Any] ): lowercase = data lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ): lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64 for i in range(16 , 80 ): lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.padding() lowercase = self.split_blocks() for block in self.blocks: lowercase = self.expand_block(snake_case__ ) lowercase , lowercase , lowercase , lowercase , lowercase = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowercase = (b & c) | ((~b) & d) lowercase = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: lowercase = b ^ c ^ d lowercase = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: lowercase = (b & c) | (b & d) | (c & d) lowercase = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: lowercase = b ^ c ^ d lowercase = 0Xc_a_6_2_c_1_d_6 lowercase , lowercase , lowercase , lowercase , lowercase = ( self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(snake_case__ , 30 ), c, d, ) lowercase = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def UpperCamelCase__ ( ): lowercase = b"""Test String""" assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324 def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) lowercase = parser.parse_args() lowercase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: lowercase = f.read() else: lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" ) print(SHAaHash(lowerCAmelCase__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
72
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __SCREAMING_SNAKE_CASE : Tuple ={"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] =["DeiTFeatureExtractor"] __SCREAMING_SNAKE_CASE : Any =["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any =[ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple =[ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
721
class A_ : def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ): lowercase = name lowercase = val def __str__( self : str ): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : int , snake_case__ : Optional[int] ): return self.val < other.val class A_ : def __init__( self : str , snake_case__ : List[str] ): lowercase = {} lowercase = {} lowercase = self.build_heap(snake_case__ ) def __getitem__( self : Union[str, Any] , snake_case__ : int ): return self.get_value(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ): return (idx - 1) // 2 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ): return idx * 2 + 1 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] ): return idx * 2 + 2 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ): return self.heap_dict[key] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ): lowercase = len(snake_case__ ) - 1 lowercase = self.get_parent_idx(snake_case__ ) for idx, i in enumerate(snake_case__ ): lowercase = idx lowercase = i.val for i in range(snake_case__ , -1 , -1 ): self.sift_down(snake_case__ , snake_case__ ) return array def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : str ): while True: lowercase = self.get_left_child_idx(snake_case__ ) # noqa: E741 lowercase = self.get_right_child_idx(snake_case__ ) lowercase = idx if l < len(snake_case__ ) and array[l] < array[idx]: lowercase = l if r < len(snake_case__ ) and array[r] < array[smallest]: lowercase = r if smallest != idx: lowercase , lowercase = array[smallest], array[idx] ( ( lowercase ) , ( lowercase ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase = smallest else: break def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ): lowercase = self.get_parent_idx(snake_case__ ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase , lowercase = self.heap[idx], self.heap[p] lowercase , lowercase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase = p lowercase = self.get_parent_idx(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): return self.heap[0] def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase , lowercase = self.heap[-1], self.heap[0] lowercase , lowercase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): self.heap.append(snake_case__ ) lowercase = len(self.heap ) - 1 lowercase = node.val self.sift_up(len(self.heap ) - 1 ) def SCREAMING_SNAKE_CASE__ ( self : int ): return len(self.heap ) == 0 def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase = new_value lowercase = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE : Any =Node('''R''', -1) __SCREAMING_SNAKE_CASE : Union[str, Any] =Node('''B''', 6) __SCREAMING_SNAKE_CASE : str =Node('''A''', 3) __SCREAMING_SNAKE_CASE : List[Any] =Node('''X''', 1) __SCREAMING_SNAKE_CASE : str =Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE : Any =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
72
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ={ '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class A_ ( __a ): _A :List[Any] = '''funnel''' _A :List[str] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', } def __init__( self : Tuple , snake_case__ : Dict=3_05_22 , snake_case__ : int=[4, 4, 4] , snake_case__ : Tuple=None , snake_case__ : List[str]=2 , snake_case__ : str=7_68 , snake_case__ : Union[str, Any]=12 , snake_case__ : Any=64 , snake_case__ : List[Any]=30_72 , snake_case__ : List[Any]="gelu_new" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Any=0.0 , snake_case__ : int=0.1 , snake_case__ : Optional[int]=None , snake_case__ : Dict=1E-9 , snake_case__ : Optional[Any]="mean" , snake_case__ : Tuple="relative_shift" , snake_case__ : str=True , snake_case__ : int=True , snake_case__ : Dict=True , **snake_case__ : Optional[Any] , ): lowercase = vocab_size lowercase = block_sizes lowercase = [1] * len(A__ ) if block_repeats is None else block_repeats assert len(A__ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." lowercase = num_decoder_layers lowercase = d_model lowercase = n_head lowercase = d_head lowercase = d_inner lowercase = hidden_act lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = initializer_range lowercase = initializer_std lowercase = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" lowercase = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" lowercase = attention_type lowercase = separate_cls lowercase = truncate_seq lowercase = pool_q_only super().__init__(**A__ ) @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return sum(self.block_sizes ) @num_hidden_layers.setter def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[str] ): raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" ) @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): return len(self.block_sizes ) @num_blocks.setter def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[Any] ): raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
700
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig''' # Base docstring __SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat''' __SCREAMING_SNAKE_CASE : Tuple =[ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ): if drop_prob == 0.0 or not training: return input lowercase = 1 - drop_prob lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize lowercase = input.div(lowerCAmelCase__ ) * random_tensor return output class A_ ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ): super().__init__() lowercase = drop_prob def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ): return drop_path(snake_case__ , self.drop_prob , self.training ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return "p={}".format(self.drop_prob ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ): super().__init__() lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride) lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding) lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ ) lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ): lowercase = self.projection(snake_case__ ) lowercase = self.norm(snake_case__ ) return embeddings class A_ ( nn.GroupNorm ): def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ): super().__init__(1 , snake_case__ , **snake_case__ ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any ): super().__init__() lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ): return self.pool(snake_case__ ) - hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ): super().__init__() lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = PoolFormerDropPath(snake_case__ ) if isinstance(config.hidden_act , snake_case__ ): lowercase = ACTaFN[config.hidden_act] else: lowercase = config.hidden_act def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ): lowercase = self.conva(snake_case__ ) lowercase = self.act_fn(snake_case__ ) lowercase = self.drop(snake_case__ ) lowercase = self.conva(snake_case__ ) lowercase = self.drop(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ): super().__init__() lowercase = PoolFormerPooling(snake_case__ ) lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) # Useful for training neural nets lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity() lowercase = config.use_layer_scale if config.use_layer_scale: lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): if self.use_layer_scale: lowercase = self.pooling(self.before_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = () lowercase = self.output(self.after_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = (output,) + outputs return outputs else: lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) ) # First residual connection lowercase = pooling_output + hidden_states lowercase = () # Second residual connection inside the PoolFormerOutput block lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) ) lowercase = hidden_states + layer_output lowercase = (output,) + outputs return outputs class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): super().__init__() lowercase = config # stochastic depth decay rule lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase = nn.ModuleList(snake_case__ ) # Transformer blocks lowercase = [] lowercase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case__ ) ) lowercase = nn.ModuleList(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ): lowercase = () if output_hidden_states else None lowercase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase , lowercase = layers # Get patch embeddings from hidden_states lowercase = embedding_layer(snake_case__ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case__ ): lowercase = blk(snake_case__ ) lowercase = layer_outputs[0] if output_hidden_states: lowercase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class A_ ( __a ): _A :Any = PoolFormerConfig _A :int = '''poolformer''' _A :Union[str, Any] = '''pixel_values''' _A :str = True def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): if isinstance(snake_case__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ): if isinstance(snake_case__ , snake_case__ ): lowercase = value __SCREAMING_SNAKE_CASE : Optional[Any] =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __SCREAMING_SNAKE_CASE : str =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , ) class A_ ( __a ): def __init__( self : Union[str, Any] , snake_case__ : int ): super().__init__(snake_case__ ) lowercase = config lowercase = PoolFormerEncoder(snake_case__ ) # Initialize weights and apply final processing self.post_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowercase = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[int] ): super().__init__() lowercase = nn.Linear(config.hidden_size , config.hidden_size ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = self.dense(snake_case__ ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , __a , ) class A_ ( __a ): def __init__( self : Dict , snake_case__ : Any ): super().__init__(snake_case__ ) lowercase = config.num_labels lowercase = PoolFormerModel(snake_case__ ) # Final norm lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = self.poolformer( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = outputs[0] lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) ) lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase = """single_label_classification""" else: lowercase = """multi_label_classification""" if self.config.problem_type == "regression": lowercase = MSELoss() if self.num_labels == 1: lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase = loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": lowercase = CrossEntropyLoss() lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase = BCEWithLogitsLoss() lowercase = loss_fct(snake_case__ , snake_case__ ) if not return_dict: lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
72
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __SCREAMING_SNAKE_CASE : int =R''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `\" / \"`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `\" // \"`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `\"train\"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `\"compressed\"`) The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and `\"compressed\"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a \"dummy\" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(__a ) class A_ ( __a ): _A :Union[str, Any] = 'rag' _A :Optional[int] = True def __init__( self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=True , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : Tuple=None , snake_case__ : List[Any]=" / " , snake_case__ : int=" // " , snake_case__ : str=5 , snake_case__ : List[Any]=3_00 , snake_case__ : Dict=7_68 , snake_case__ : int=8 , snake_case__ : str="wiki_dpr" , snake_case__ : List[Any]="train" , snake_case__ : str="compressed" , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=False , snake_case__ : Dict=False , snake_case__ : List[Any]=0.0 , snake_case__ : int=True , snake_case__ : Optional[int]=False , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , **snake_case__ : List[Any] , ): super().__init__( bos_token_id=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , prefix=snake_case__ , vocab_size=snake_case__ , **snake_case__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase = kwargs.pop("""question_encoder""" ) lowercase = question_encoder_config.pop("""model_type""" ) lowercase = kwargs.pop("""generator""" ) lowercase = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig lowercase = AutoConfig.for_model(snake_case__ , **snake_case__ ) lowercase = AutoConfig.for_model(snake_case__ , **snake_case__ ) lowercase = reduce_loss lowercase = label_smoothing lowercase = exclude_bos_score lowercase = do_marginalize lowercase = title_sep lowercase = doc_sep lowercase = n_docs lowercase = max_combined_length lowercase = dataset lowercase = dataset_split lowercase = index_name lowercase = retrieval_vector_size lowercase = retrieval_batch_size lowercase = passages_path lowercase = index_path lowercase = use_dummy_dataset lowercase = output_retrieved lowercase = do_deduplication lowercase = use_cache if self.forced_eos_token_id is None: lowercase = getattr(self.generator , """forced_eos_token_id""" , snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Any ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.question_encoder.to_dict() lowercase = self.generator.to_dict() lowercase = self.__class__.model_type return output
701
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): # Construct model if openai_config_file == "": lowercase = OpenAIGPTConfig() else: lowercase = OpenAIGPTConfig.from_json_file(lowercase__ ) lowercase = OpenAIGPTModel(lowercase__ ) # Load weights from numpy load_tf_weights_in_openai_gpt(lowercase__ ,lowercase__ ,lowercase__ ) # Save pytorch-model lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() ,lowercase__ ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase__ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--openai_checkpoint_folder_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--openai_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) __SCREAMING_SNAKE_CASE : Tuple =parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
702
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
0
import re import string import numpy as np import datasets __SCREAMING_SNAKE_CASE : Optional[Any] ='\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __SCREAMING_SNAKE_CASE : int ='\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __SCREAMING_SNAKE_CASE : List[Any] ='\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : List[str]=None , snake_case__ : List[Any]=False , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: lowercase = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in predictions] ) lowercase = np.array([re.sub(lowercase__ , """""" , lowercase__ ) for x in references] ) else: lowercase = np.asarray(lowercase__ ) lowercase = np.asarray(lowercase__ ) if ignore_case: lowercase = np.char.lower(lowercase__ ) lowercase = np.char.lower(lowercase__ ) if ignore_punctuation: lowercase = string.punctuation.maketrans("""""" , """""" , string.punctuation ) lowercase = np.char.translate(lowercase__ , table=lowercase__ ) lowercase = np.char.translate(lowercase__ , table=lowercase__ ) if ignore_numbers: lowercase = string.digits.maketrans("""""" , """""" , string.digits ) lowercase = np.char.translate(lowercase__ , table=lowercase__ ) lowercase = np.char.translate(lowercase__ , table=lowercase__ ) lowercase = predictions == references return {"exact_match": np.mean(lowercase__ ) * 1_00}
703
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A_ ( __a ): _A :Tuple = '''data2vec-audio''' def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ): super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) lowercase = hidden_size lowercase = feat_extract_activation lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = conv_bias lowercase = num_conv_pos_embeddings lowercase = num_conv_pos_embedding_groups lowercase = conv_pos_kernel_size lowercase = len(self.conv_dim ) lowercase = num_hidden_layers lowercase = intermediate_size lowercase = hidden_act lowercase = num_attention_heads lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = feat_proj_dropout lowercase = final_dropout lowercase = layerdrop lowercase = layer_norm_eps lowercase = initializer_range lowercase = vocab_size lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase = mask_time_prob lowercase = mask_time_length lowercase = mask_time_min_masks lowercase = mask_feature_prob lowercase = mask_feature_length lowercase = mask_feature_min_masks # ctc loss lowercase = ctc_loss_reduction lowercase = ctc_zero_infinity # adapter lowercase = add_adapter lowercase = adapter_kernel_size lowercase = adapter_stride lowercase = num_adapter_layers lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return math.prod(self.conv_stride )
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 1 lowercase = 2 while i * i <= n: lowercase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def UpperCamelCase__ ( ): lowercase = 1 lowercase = 1 while True: i += 1 t_num += i if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500: break return t_num if __name__ == "__main__": print(solution())
704
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" ) lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] ) lowercase = checkpoint["""model"""] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()} lowercase = XGLMConfig( vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase = XGLMForCausalLM(lowerCAmelCase__ ) lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = torch.nn.Linear(10 , 10 ) lowercase = torch.optim.SGD(model.parameters() , 0.1 ) lowercase = Accelerator() lowercase = accelerator.prepare(__snake_case ) try: pickle.loads(pickle.dumps(__snake_case ) ) except Exception as e: self.fail(F"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
705
from __future__ import annotations import bisect def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left <= right: lowercase = left + (right - left) // 2 lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase = midpoint - 1 else: lowercase = midpoint + 1 return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ) if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if right < left: return None lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip() __SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''',''')) __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n''')) __SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
72
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] ={ """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class A_ ( __a ): _A :Tuple = "cvt" def __init__( self : Optional[Any] , snake_case__ : Optional[Any]=3 , snake_case__ : int=[7, 3, 3] , snake_case__ : Optional[Any]=[4, 2, 2] , snake_case__ : int=[2, 1, 1] , snake_case__ : str=[64, 1_92, 3_84] , snake_case__ : Union[str, Any]=[1, 3, 6] , snake_case__ : Dict=[1, 2, 10] , snake_case__ : List[str]=[4.0, 4.0, 4.0] , snake_case__ : int=[0.0, 0.0, 0.0] , snake_case__ : Optional[Any]=[0.0, 0.0, 0.0] , snake_case__ : Any=[0.0, 0.0, 0.1] , snake_case__ : int=[True, True, True] , snake_case__ : Any=[False, False, True] , snake_case__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , snake_case__ : int=[3, 3, 3] , snake_case__ : List[Any]=[1, 1, 1] , snake_case__ : List[Any]=[2, 2, 2] , snake_case__ : Any=[1, 1, 1] , snake_case__ : List[str]=[1, 1, 1] , snake_case__ : Optional[int]=0.02 , snake_case__ : Optional[int]=1E-12 , **snake_case__ : str , ): super().__init__(**snake_case__ ) lowercase = num_channels lowercase = patch_sizes lowercase = patch_stride lowercase = patch_padding lowercase = embed_dim lowercase = num_heads lowercase = depth lowercase = mlp_ratio lowercase = attention_drop_rate lowercase = drop_rate lowercase = drop_path_rate lowercase = qkv_bias lowercase = cls_token lowercase = qkv_projection_method lowercase = kernel_qkv lowercase = padding_kv lowercase = stride_kv lowercase = padding_q lowercase = stride_q lowercase = initializer_range lowercase = layer_norm_eps
706
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = multiprocessing.Manager() lowercase = manager.list() lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase = shutil.rmtree lowercase = os.rmdir lowercase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase = {} with swallow_io(): with time_limit(lowerCAmelCase__ ): exec(lowerCAmelCase__ ,lowerCAmelCase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase = rmtree lowercase = rmdir lowercase = chdir @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ ) signal.signal(signal.SIGALRM ,lowerCAmelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0 ) @contextlib.contextmanager def UpperCamelCase__ ( ): lowercase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase__ ): with contextlib.redirect_stderr(lowerCAmelCase__ ): with redirect_stdin(lowerCAmelCase__ ): yield @contextlib.contextmanager def UpperCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase__ ): yield dirname class A_ ( __a ): pass class A_ ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ): return False class A_ ( contextlib._RedirectStream ): # type: ignore _A :List[Any] = '''stdin''' @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): if root == ".": yield return lowercase = os.getcwd() os.chdir(lowerCAmelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase = None lowercase = None import os lowercase = """1""" lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None import shutil lowercase = None lowercase = None lowercase = None import subprocess lowercase = None # type: ignore lowercase = None import sys lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None
72
0
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __SCREAMING_SNAKE_CASE : str =logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : List[Any] =list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __SCREAMING_SNAKE_CASE : str =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A_ : _A :Optional[str] = field( default=_UpperCamelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_UpperCamelCase )} , ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class A_ : _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) _A :Optional[str] = field( default=_UpperCamelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) _A :bool = field( default=_UpperCamelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) _A :bool = field( default=_UpperCamelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) _A :bool = field(default=_UpperCamelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) _A :float = field( default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) _A :float = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) _A :int = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) _A :int = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) _A :bool = field( default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,): def _dataset(lowerCAmelCase__ ,lowerCAmelCase__=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ,ref_path=lowercase_ ,) return LineByLineTextDataset(tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ) else: return TextDataset( tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowercase_ ,) if evaluate: return _dataset(args.eval_data_file ,args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowercase_ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file ,args.train_ref_file ) def UpperCamelCase__ ( ): lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" ,lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: lowercase = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir ) else: lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase_ ,cache_dir=model_args.cache_dir ,) else: logger.info("""Training new model from scratch""" ) lowercase = AutoModelWithLMHead.from_config(lowercase_ ) model.resize_token_embeddings(len(lowercase_ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: lowercase = min(data_args.block_size ,tokenizer.max_len ) # Get datasets lowercase = ( get_dataset(lowercase_ ,tokenizer=lowercase_ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None ) lowercase = ( get_dataset(lowercase_ ,tokenizer=lowercase_ ,evaluate=lowercase_ ,cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=lowercase_ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,) else: if data_args.mlm and data_args.whole_word_mask: lowercase = DataCollatorForWholeWordMask( tokenizer=lowercase_ ,mlm_probability=data_args.mlm_probability ) else: lowercase = DataCollatorForLanguageModeling( tokenizer=lowercase_ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowercase = Trainer( model=lowercase_ ,args=lowercase_ ,data_collator=lowercase_ ,train_dataset=lowercase_ ,eval_dataset=lowercase_ ,prediction_loss_only=lowercase_ ,) # Training if training_args.do_train: lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowercase_ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase = trainer.evaluate() lowercase = math.exp(eval_output["""eval_loss"""] ) lowercase = {"perplexity": perplexity} lowercase = os.path.join(training_args.output_dir ,"""eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowercase_ ,"""w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" ,lowercase_ ,str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowercase_ ) return results def UpperCamelCase__ ( lowerCAmelCase__ ): main() if __name__ == "__main__": main()
707
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __a ): _A :Optional[int] = ['''image_processor''', '''tokenizer'''] _A :Tuple = '''BlipImageProcessor''' _A :List[Any] = '''AutoTokenizer''' def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Dict ): lowercase = False super().__init__(snake_case__ , snake_case__ ) lowercase = self.image_processor def __call__( self : List[str] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowercase = self.tokenizer lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : int , **snake_case__ : int ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ): '''simple docstring''' if bit_count < 0: raise ValueError("""The given input must be positive""" ) # get the generated string sequence lowercase = gray_code_sequence_string(__UpperCamelCase ) # # convert them to integers for i in range(len(__UpperCamelCase ) ): lowercase = int(sequence[i] ,2 ) return sequence def UpperCamelCase__ ( lowerCAmelCase__ ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] lowercase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits lowercase = gray_code_sequence_string(bit_count - 1 ) lowercase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): lowercase = """0""" + smaller_sequence[i] sequence.append(__UpperCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): lowercase = """1""" + smaller_sequence[i] sequence.append(__UpperCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
708
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCamelCase__ ( lowerCAmelCase__ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase = model_type_to_module_name(lowerCAmelCase__ ) lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" ) try: return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase = importlib.import_module("""transformers""" ) if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ): return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,): lowercase = get_file_from_repo( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader: return json.load(lowerCAmelCase__ ) class A_ : def __init__( self : List[Any] ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ): lowercase = kwargs.pop("""config""" , snake_case__ ) lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ ) lowercase = True lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ ) lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ ) lowercase = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case__ , snake_case__ ): lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) # It could be in `config.feature_extractor_type`` lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ ) if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: lowercase = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: lowercase = feature_extractor_class_from_name(snake_case__ ) lowercase = feature_extractor_auto_map is not None lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING lowercase = resolve_trust_remote_code( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if has_remote_code and trust_remote_code: lowercase = get_class_from_dynamic_module( snake_case__ , snake_case__ , **snake_case__ ) lowercase = kwargs.pop("""code_revision""" , snake_case__ ) if os.path.isdir(snake_case__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )] return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ): FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
72
0
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): _A = StableUnCLIPImgaImgPipeline _A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _A = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _A = frozenset([] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = 32 lowercase = embedder_hidden_size # image encoding components lowercase = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) lowercase = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) lowercase = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase ) lowercase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) lowercase = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) lowercase = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , ) torch.manual_seed(0 ) lowercase = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) lowercase = AutoencoderKL() lowercase = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any]=0 , snake_case__ : Optional[int]=True ): if str(_lowerCAmelCase ).startswith("""mps""" ): lowercase = torch.manual_seed(_lowerCAmelCase ) else: lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) if pil_image: lowercase = input_image * 0.5 + 0.5 lowercase = input_image.clamp(0 , 1 ) lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase = DiffusionPipeline.numpy_to_pil(_lowerCAmelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator lowercase = self.get_dummy_components() lowercase = StableUnCLIPImgaImgPipeline(**_lowerCAmelCase ) lowercase = sd_pipe.to(_lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase = self.get_dummy_inputs(_lowerCAmelCase ) inputs.update({"""image_embeds""": None} ) lowercase = sd_pipe(**_lowerCAmelCase ).images lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def SCREAMING_SNAKE_CASE__ ( self : int ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCAmelCase ) @slow @require_torch_gpu class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase = pipe(_lowerCAmelCase , """anime turle""" , generator=_lowerCAmelCase , output_type="""np""" ) lowercase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) lowercase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase = pipe(_lowerCAmelCase , """anime turle""" , generator=_lowerCAmelCase , output_type="""np""" ) lowercase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) lowercase = pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowercase = pipe( _lowerCAmelCase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) lowercase = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
709
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''') __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : List[str] ={ **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Optional[int] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : List[str] =[ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value elif weight_type == "running_mean": lowercase = value elif weight_type == "running_var": lowercase = value elif weight_type == "num_batches_tracked": lowercase = value else: lowercase = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] if task == "s2t": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2T lowercase = IGNORE_KEYS_S2T elif task == "t2s": lowercase = None lowercase = MAPPING_T2S lowercase = IGNORE_KEYS_T2S elif task == "s2s": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2S lowercase = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowercase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: lowercase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase = True if "*" in mapped_key: lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: lowercase = """weight""" elif "running_mean" in name: lowercase = """running_mean""" elif "running_var" in name: lowercase = """running_var""" elif "num_batches_tracked" in name: lowercase = """num_batches_tracked""" else: lowercase = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,): if config_path is not None: lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = SpeechTaConfig() if task == "s2t": lowercase = config.max_text_positions lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": lowercase = 1_876 lowercase = 600 lowercase = config.max_speech_positions lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": lowercase = 1_876 lowercase = config.max_speech_positions lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) lowercase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowercase = SpeechTaFeatureExtractor() lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowercase = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
72
0
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : Optional[Any] =logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int =tf.data.AUTOTUNE def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" ,type=snake_case_ ,default="""roberta-base""" ,help="""The model config to use. Note that we don't copy the model's weights, only the config!""" ,) parser.add_argument( """--tokenizer""" ,type=snake_case_ ,default="""unigram-tokenizer-wikitext""" ,help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" ,) parser.add_argument( """--per_replica_batch_size""" ,type=snake_case_ ,default=8 ,help="""Batch size per TPU core.""" ,) parser.add_argument( """--no_tpu""" ,action="""store_true""" ,help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" ,) parser.add_argument( """--tpu_name""" ,type=snake_case_ ,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" ,default="""local""" ,) parser.add_argument( """--tpu_zone""" ,type=snake_case_ ,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" ,) parser.add_argument( """--gcp_project""" ,type=snake_case_ ,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" ,action="""store_true""" ,help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" ,) parser.add_argument( """--train_dataset""" ,type=snake_case_ ,help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" ,) parser.add_argument( """--shuffle_buffer_size""" ,type=snake_case_ ,default=2**18 ,help="""Size of the shuffle buffer (in samples)""" ,) parser.add_argument( """--eval_dataset""" ,type=snake_case_ ,help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" ,) parser.add_argument( """--num_epochs""" ,type=snake_case_ ,default=1 ,help="""Number of epochs to train for.""" ,) parser.add_argument( """--learning_rate""" ,type=snake_case_ ,default=1E-4 ,help="""Learning rate to use for training.""" ,) parser.add_argument( """--weight_decay_rate""" ,type=snake_case_ ,default=1E-3 ,help="""Weight decay rate to use for training.""" ,) parser.add_argument( """--max_length""" ,type=snake_case_ ,default=512 ,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" ,) parser.add_argument( """--mlm_probability""" ,type=snake_case_ ,default=0.15 ,help="""Fraction of tokens to mask during training.""" ,) parser.add_argument("""--output_dir""" ,type=snake_case_ ,required=snake_case_ ,help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" ,type=snake_case_ ,help="""Model ID to upload to on the Hugging Face Hub.""" ) lowercase = parser.parse_args() return args def UpperCamelCase__ ( lowerCAmelCase__ ): try: if args.tpu_name: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project ) else: lowercase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(snake_case_ ) tf.tpu.experimental.initialize_tpu_system(snake_case_ ) return tpu def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 0 for file in file_list: lowercase = file.split("""/""" )[-1] lowercase = re.search(r"""-\d+-(\d+)\.tfrecord""" ,snake_case_ ).group(1 ) lowercase = int(snake_case_ ) num_samples += sample_count return num_samples def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = count_samples(snake_case_ ) lowercase = tf.data.Dataset.from_tensor_slices(snake_case_ ) if shuffle: lowercase = dataset.shuffle(len(snake_case_ ) ) lowercase = tf.data.TFRecordDataset(snake_case_ ,num_parallel_reads=snake_case_ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here lowercase = dataset.apply(tf.data.experimental.assert_cardinality(snake_case_ ) ) lowercase = dataset.map(snake_case_ ,num_parallel_calls=snake_case_ ) if shuffle: assert shuffle_buffer_size is not None lowercase = dataset.shuffle(args.shuffle_buffer_size ) lowercase = dataset.batch(snake_case_ ,drop_remainder=snake_case_ ) lowercase = dataset.map(snake_case_ ,num_parallel_calls=snake_case_ ) lowercase = dataset.prefetch(snake_case_ ) return dataset def UpperCamelCase__ ( lowerCAmelCase__ ): if not args.no_tpu: lowercase = initialize_tpu(snake_case_ ) lowercase = tf.distribute.TPUStrategy(snake_case_ ) else: lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) lowercase = AutoTokenizer.from_pretrained(args.tokenizer ) lowercase = AutoConfig.from_pretrained(args.pretrained_model_config ) lowercase = tokenizer.vocab_size lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset ,"""*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"""*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) lowercase = count_samples(snake_case_ ) lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) lowercase = steps_per_epoch * args.num_epochs with strategy.scope(): lowercase = TFAutoModelForMaskedLM.from_config(snake_case_ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built lowercase = create_optimizer( num_train_steps=snake_case_ ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=snake_case_ ,metrics=["""accuracy"""] ) def decode_fn(lowerCAmelCase__ ): lowercase = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ), } return tf.io.parse_single_example(snake_case_ ,snake_case_ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. lowercase = DataCollatorForLanguageModeling( tokenizer=snake_case_ ,mlm_probability=args.mlm_probability ,mlm=snake_case_ ,return_tensors="""tf""" ) def mask_with_collator(lowerCAmelCase__ ): # TF really needs an isin() function lowercase = ( ~tf.cast(batch["""attention_mask"""] ,tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) lowercase = data_collator.tf_mask_tokens( batch["""input_ids"""] ,vocab_size=len(snake_case_ ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=snake_case_ ,) return batch lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync lowercase = prepare_dataset( snake_case_ ,decode_fn=snake_case_ ,mask_fn=snake_case_ ,batch_size=snake_case_ ,shuffle=snake_case_ ,shuffle_buffer_size=args.shuffle_buffer_size ,) lowercase = prepare_dataset( snake_case_ ,decode_fn=snake_case_ ,mask_fn=snake_case_ ,batch_size=snake_case_ ,shuffle=snake_case_ ,) lowercase = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=snake_case_ ) ) model.fit( snake_case_ ,validation_data=snake_case_ ,epochs=args.num_epochs ,callbacks=snake_case_ ,) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] =parse_args() main(args)
710
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE : List[Any] ='''.''' if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : Dict =[] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE : Optional[Any] =line.strip() __SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
0
import copy import re class A_ : _A :List[Any] = '''hp''' _A :Dict = {} _A :Optional[Any] = None @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , snake_case__ : Dict , snake_case__ : Any ): lowercase = prefix lowercase = defaults cls.build_naming_info() @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] ): if len(UpperCamelCase__ ) == 0: return "" lowercase = None if any(char.isdigit() for char in word ): raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(UpperCamelCase__ ) + 1 ): lowercase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowercase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(snake_case__ : List[str] ): lowercase = '''''' while integer != 0: lowercase = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s lowercase = 0 while True: lowercase = word + '''#''' + int_to_alphabetic(UpperCamelCase__ ) if sword in info["reverse_short_word"]: continue else: lowercase = sword break lowercase = short_word lowercase = word return short_word @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): lowercase = param_name.split("""_""" ) lowercase = [TrialShortNamer.shortname_for_word(UpperCamelCase__ , UpperCamelCase__ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowercase = ['''''', '''_'''] for separator in separators: lowercase = separator.join(UpperCamelCase__ ) if shortname not in info["reverse_short_param"]: lowercase = shortname lowercase = param_name return shortname return param_name @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Any , snake_case__ : List[str] ): lowercase = TrialShortNamer.shortname_for_key(UpperCamelCase__ , UpperCamelCase__ ) lowercase = short_name lowercase = param_name @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ): if cls.NAMING_INFO is not None: return lowercase = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } lowercase = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(UpperCamelCase__ , UpperCamelCase__ ) lowercase = info @classmethod def SCREAMING_SNAKE_CASE__ ( cls : str , snake_case__ : List[str] ): cls.build_naming_info() assert cls.PREFIX is not None lowercase = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowercase = cls.NAMING_INFO['''short_param'''][k] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase = 1 if v else 0 lowercase = '''''' if isinstance(UpperCamelCase__ , (int, float) ) else '''-''' lowercase = F"""{key}{sep}{v}""" name.append(UpperCamelCase__ ) return "_".join(UpperCamelCase__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : str , snake_case__ : Union[str, Any] ): lowercase = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowercase = [] else: lowercase = repr.split("""_""" ) lowercase = {} for value in values: if "-" in value: lowercase = value.split("""-""" ) else: lowercase = re.sub("""[0-9.]""" , """""" , UpperCamelCase__ ) lowercase = float(re.sub("""[^0-9.]""" , """""" , UpperCamelCase__ ) ) lowercase = cls.NAMING_INFO['''reverse_short_param'''][p_k] lowercase = p_v for k in cls.DEFAULTS: if k not in parameters: lowercase = cls.DEFAULTS[k] return parameters
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
0
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class A_ ( lowerCAmelCase__ ): def __init__( self : Any , **snake_case__ : Any ): super().__init__(**_lowerCamelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : List[str] , snake_case__ : Any , **snake_case__ : List[Any] ): return super().__call__(_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **snake_case__ : Dict ): lowercase = {} if "candidate_labels" in kwargs: lowercase = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowercase = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Any , snake_case__ : List[str]=None , snake_case__ : int="This is a photo of {}." ): lowercase = load_image(_lowerCamelCase ) lowercase = self.image_processor(images=[image] , return_tensors=self.framework ) lowercase = candidate_labels lowercase = [hypothesis_template.format(_lowerCamelCase ) for x in candidate_labels] lowercase = self.tokenizer(_lowerCamelCase , return_tensors=self.framework , padding=_lowerCamelCase ) lowercase = [text_inputs] return inputs def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any ): lowercase = model_inputs.pop("""candidate_labels""" ) lowercase = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , _lowerCamelCase ): lowercase = text_inputs[0] else: # Batching case. lowercase = text_inputs[0][0] lowercase = self.model(**_lowerCamelCase , **_lowerCamelCase ) lowercase = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Dict ): lowercase = model_outputs.pop("""candidate_labels""" ) lowercase = model_outputs["""logits"""][0] if self.framework == "pt": lowercase = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase = probs.tolist() if not isinstance(_lowerCamelCase , _lowerCamelCase ): lowercase = [scores] elif self.framework == "tf": lowercase = stable_softmax(_lowerCamelCase , axis=-1 ) lowercase = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowercase = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(_lowerCamelCase , _lowerCamelCase ) , key=lambda snake_case__ : -x[0] ) ] return result
712
import argparse import os import re import packaging.version __SCREAMING_SNAKE_CASE : Optional[int] ='''examples/''' __SCREAMING_SNAKE_CASE : Any ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __SCREAMING_SNAKE_CASE : Any ='''README.md''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.read() lowercase , lowercase = REPLACE_PATTERNS[pattern] lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ ) lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): for folder, directories, fnames in os.walk(lowerCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) if not patch: update_version_in_examples(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = """🤗 Transformers currently provides the following architectures""" lowercase = """1. Want to contribute a new model?""" with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.readlines() # Find the start of the list. lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowerCAmelCase__ ) def UpperCamelCase__ ( ): with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: lowercase = f.read() lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0] return packaging.version.parse(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=False ): lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase = default_version.base_version elif patch: lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def UpperCamelCase__ ( ): lowercase = get_version() lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase = current_version.base_version # Check with the user we got that right. lowercase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
72
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __SCREAMING_SNAKE_CASE : int =datasets.utils.logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] =["names", "prefix"] __SCREAMING_SNAKE_CASE : Dict =["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] __SCREAMING_SNAKE_CASE : Optional[int] =["encoding_errors", "on_bad_lines"] __SCREAMING_SNAKE_CASE : Optional[int] =["date_format"] @dataclass class A_ ( datasets.BuilderConfig ): _A :int = ''',''' _A :Any = None _A :List[Any] = '''infer''' _A :str = None _A :List[str] = None _A :Optional[int] = None _A :Optional[Any] = None _A :Dict = None _A :int = True _A :List[str] = None _A :Union[str, Any] = None _A :Optional[Any] = None _A :List[Any] = None _A :Tuple = False _A :str = None _A :Optional[Any] = None _A :Tuple = None _A :Optional[Any] = True _A :List[str] = True _A :int = False _A :int = True _A :Any = None _A :str = '''.''' _A :List[str] = None _A :str = '''"''' _A :Union[str, Any] = 0 _A :List[Any] = None _A :Any = None _A :str = None _A :Optional[int] = None _A :int = True _A :List[str] = True _A :Dict = 0 _A :Dict = True _A :List[str] = False _A :List[Any] = None _A :int = 1_0000 _A :Optional[int] = None _A :Optional[int] = '''strict''' _A :int = '''error''' _A :Any = None def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): if self.delimiter is not None: lowercase = self.delimiter if self.column_names is not None: lowercase = self.column_names @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A_ ( datasets.ArrowBasedBuilder ): _A :List[str] = CsvConfig def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Union[str, Any] ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowercase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case__ , (str, list, tuple) ): lowercase = data_files if isinstance(snake_case__ , snake_case__ ): lowercase = [files] lowercase = [dl_manager.iter_files(snake_case__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] lowercase = [] for split_name, files in data_files.items(): if isinstance(snake_case__ , snake_case__ ): lowercase = [files] lowercase = [dl_manager.iter_files(snake_case__ ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"""files""": files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Union[str, Any] ): if self.config.features is not None: lowercase = self.config.features.arrow_schema if all(not require_storage_cast(snake_case__ ) for feature in self.config.features.values() ): # cheaper cast lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowercase = table_cast(snake_case__ , snake_case__ ) return pa_table def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Tuple ): lowercase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowercase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ): lowercase = pd.read_csv(snake_case__ , iterator=snake_case__ , dtype=snake_case__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(snake_case__ ): lowercase = pa.Table.from_pandas(snake_case__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(snake_case__ ) except ValueError as e: logger.error(F"""Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}""" ) raise
713
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ={ '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class A_ ( __a ): _A :List[str] = '''pix2struct_text_model''' _A :int = ['''past_key_values'''] _A :Optional[Any] = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ): lowercase = vocab_size lowercase = hidden_size lowercase = d_kv lowercase = d_ff lowercase = num_layers lowercase = num_heads lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = dropout_rate lowercase = layer_norm_epsilon lowercase = initializer_factor lowercase = use_cache lowercase = eos_token_id lowercase = decoder_start_token_id # for backwards compatibility lowercase = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :Optional[int] = '''pix2struct_vision_model''' def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowercase = hidden_size lowercase = patch_embed_hidden_size lowercase = d_ff lowercase = dropout_rate lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = initializer_range lowercase = initializer_factor lowercase = attention_dropout lowercase = layer_norm_eps lowercase = dense_act_fn lowercase = seq_len lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :int = '''pix2struct''' _A :str = True def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: lowercase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) lowercase = PixaStructTextConfig(**snake_case__ ) lowercase = PixaStructVisionConfig(**snake_case__ ) lowercase = self.text_config.decoder_start_token_id lowercase = self.text_config.pad_token_id lowercase = self.text_config.eos_token_id lowercase = initializer_factor lowercase = initializer_range lowercase = self.initializer_range lowercase = self.initializer_range lowercase = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
72
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__) class A_ ( __lowercase ): _A :str = ['pixel_values'] def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] = True , snake_case__ : Any = None , snake_case__ : Optional[Any] = PILImageResampling.BICUBIC , snake_case__ : str = True , snake_case__ : Dict = True , snake_case__ : List[str] = 1 / 2_55 , snake_case__ : Tuple = None , snake_case__ : Union[str, Any] = True , snake_case__ : Tuple = None , snake_case__ : Tuple = None , **snake_case__ : Union[str, Any] , ): super().__init__(**__A ) lowercase = size if size is not None else {"""height""": 2_24, """width""": 2_24} lowercase = get_size_dict(__A ) lowercase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} lowercase = get_size_dict(__A , default_to_square=__A , param_name="""crop_size""" ) lowercase = do_resize lowercase = do_rescale lowercase = do_normalize lowercase = do_center_crop lowercase = crop_size lowercase = size lowercase = resample lowercase = rescale_factor lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any = PILImageResampling.BILINEAR , snake_case__ : int = None , **snake_case__ : Union[str, Any] , ): lowercase = get_size_dict(__A ) if "shortest_edge" in size: lowercase = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: lowercase = (size["""height"""], size["""width"""]) else: raise ValueError(F"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] = None , **snake_case__ : Optional[Any] , ): lowercase = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Dict = None , **snake_case__ : List[Any] ): return rescale(__A , scale=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] = None , **snake_case__ : List[Any] , ): return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Any , snake_case__ : Any = None , snake_case__ : Union[str, Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : List[Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : List[Any] = None , snake_case__ : str = None , snake_case__ : str = None , snake_case__ : Optional[int] = None , snake_case__ : str = None , snake_case__ : int = None , snake_case__ : List[str] = ChannelDimension.FIRST , **snake_case__ : int , ): lowercase = do_resize if do_resize is not None else self.do_resize lowercase = do_rescale if do_rescale is not None else self.do_rescale lowercase = do_normalize if do_normalize is not None else self.do_normalize lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase = crop_size if crop_size is not None else self.crop_size lowercase = get_size_dict(__A , param_name="""crop_size""" , default_to_square=__A ) lowercase = resample if resample is not None else self.resample lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase = image_mean if image_mean is not None else self.image_mean lowercase = image_std if image_std is not None else self.image_std lowercase = size if size is not None else self.size lowercase = get_size_dict(__A ) if not is_batched(__A ): lowercase = [images] if not valid_images(__A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase = [to_numpy_array(__A ) for image in images] if do_resize: lowercase = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: lowercase = [self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: lowercase = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: lowercase = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] lowercase = [to_channel_dimension_format(__A , __A ) for image in images] lowercase = {"""pixel_values""": images} return BatchFeature(data=__A , tensor_type=__A )
714
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): # Test when on a single CPU or GPU that the context manager does nothing lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): # Test on distributed setup that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
72
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __SCREAMING_SNAKE_CASE : str ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any =['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __SCREAMING_SNAKE_CASE : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
715
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): _A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = 0 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write("""{}""" ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) lowercase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): class A_ ( __a ): _A :List[str] = False class A_ ( __a ): _A :Dict = False class A_ ( __a ): _A :Union[str, Any] = '''AutoFeatureExtractor''' _A :Tuple = '''AutoTokenizer''' _A :Optional[Any] = False try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class A_ ( unittest.TestCase ): _A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): lowercase = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ): try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , ) lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f: lowercase = json.load(snake_case__ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
72
0
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class A_ ( tf.keras.layers.Layer ): def __init__( self : int , snake_case__ : Dict[str, int] , snake_case__ : List[str] , snake_case__ : int = None , snake_case__ : int = None ): super().__init__() lowercase = pad_token_id lowercase = max_length lowercase = vocab lowercase = merges lowercase = BytePairTokenizer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sequence_length=__SCREAMING_SNAKE_CASE ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : GPTaTokenizer , *snake_case__ : int , **snake_case__ : List[Any] ): lowercase = [""" """.join(__SCREAMING_SNAKE_CASE ) for m in tokenizer.bpe_ranks.keys()] lowercase = tokenizer.get_vocab() return cls(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , snake_case__ : Union[str, os.PathLike] , *snake_case__ : Tuple , **snake_case__ : str ): lowercase = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return cls.from_tokenizer(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , snake_case__ : Union[str, Any] ): return cls(**__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : int = None ): lowercase = self.tf_tokenizer(__SCREAMING_SNAKE_CASE ) lowercase = tf.ones_like(__SCREAMING_SNAKE_CASE ) if self.pad_token_id is not None: # pad the tokens up to max length lowercase = max_length if max_length is not None else self.max_length if max_length is not None: lowercase , lowercase = pad_model_inputs( __SCREAMING_SNAKE_CASE , max_seq_length=__SCREAMING_SNAKE_CASE , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
716
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" ,[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" ,[ DatasetInfo(), DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_info.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfo.from_directory(lowerCAmelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) ) def UpperCamelCase__ ( ): lowercase = DatasetInfo( description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,) lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) lowercase = yaml.safe_dump(lowerCAmelCase__ ) lowercase = yaml.safe_load(lowerCAmelCase__ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase__ ( ): lowercase = DatasetInfo() lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" ,[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_infos_dict.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
72
0
import re def UpperCamelCase__ ( lowerCAmelCase__ ): if len(re.findall("""[ATCG]""" ,lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" ,"""TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
717
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = args.pruning_method lowercase = args.threshold lowercase = args.model_name_or_path.rstrip("""/""" ) lowercase = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) lowercase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: lowercase = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase , lowercase = -0.1, 1.1 lowercase = torch.sigmoid(lowerCAmelCase__ ) lowercase = s * (r - l) + l lowercase = s_bar.clamp(min=0.0 ,max=1.0 ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowercase = os.path.join( os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) __SCREAMING_SNAKE_CASE : str =parser.parse_args() main(args)
72
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __SCREAMING_SNAKE_CASE : str =get_logger(__name__) class A_ : def __init__( self : Dict , snake_case__ : Optional[str] = None ): lowercase = ( os.path.join(__A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowercase = Extractor def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowercase = os.path.abspath(__A ) return os.path.join(self.extract_dir , hash_url_to_filename(__A ) ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : str , snake_case__ : bool ): return force_extract or ( not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A )) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : str , snake_case__ : bool = False ): lowercase = self.extractor.infer_extractor_format(__A ) if not extractor_format: return input_path lowercase = self._get_output_path(__A ) if self._do_extract(__A , __A ): self.extractor.extract(__A , __A , __A ) return output_path class A_ ( __a ): @classmethod @abstractmethod def SCREAMING_SNAKE_CASE__ ( cls : str , snake_case__ : Union[Path, str] , **snake_case__ : Any ): ... @staticmethod @abstractmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): ... class A_ ( __a , __a ): _A :str = [] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : int ): with open(__A , """rb""" ) as f: return f.read(__A ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case__ : Union[Path, str] , snake_case__ : bytes = b"" ): if not magic_number: lowercase = max(len(__A ) for cls_magic_number in cls.magic_numbers ) try: lowercase = cls.read_magic_number(__A , __A ) except OSError: return False return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers ) class A_ ( __a ): @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , snake_case__ : Union[Path, str] , **snake_case__ : str ): return tarfile.is_tarfile(__A ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[Any] , snake_case__ : Any ): def resolved(snake_case__ : str ) -> str: return os.path.realpath(os.path.abspath(__A ) ) def badpath(snake_case__ : str , snake_case__ : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(__A , __A ) ).startswith(__A ) def badlink(snake_case__ : Optional[int] , snake_case__ : str ) -> bool: # Links are interpreted relative to the directory containing the link lowercase = resolved(os.path.join(__A , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=__A ) lowercase = resolved(__A ) for finfo in members: if badpath(finfo.name , __A ): logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(__A , __A ): logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(__A , __A ): logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): os.makedirs(__A , exist_ok=__A ) lowercase = tarfile.open(__A ) tar_file.extractall(__A , members=TarExtractor.safemembers(__A , __A ) ) tar_file.close() class A_ ( __a ): _A :Dict = [b'''\x1F\x8B'''] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): with gzip.open(__A , """rb""" ) as gzip_file: with open(__A , """wb""" ) as extracted_file: shutil.copyfileobj(__A , __A ) class A_ ( __a ): _A :Dict = [ b'''PK\x03\x04''', b'''PK\x05\x06''', # empty archive b'''PK\x07\x08''', # spanned archive ] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[str] , snake_case__ : Union[Path, str] , snake_case__ : bytes = b"" ): if super().is_extractable(__A , magic_number=__A ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(__A , """rb""" ) as fp: lowercase = _EndRecData(__A ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowercase = fp.read(__A ) # CD is where we expect it to be if len(__A ) == sizeCentralDir: lowercase = struct.unpack(__A , __A ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): os.makedirs(__A , exist_ok=__A ) with zipfile.ZipFile(__A , """r""" ) as zip_file: zip_file.extractall(__A ) zip_file.close() class A_ ( __a ): _A :Tuple = [b'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): with lzma.open(__A ) as compressed_file: with open(__A , """wb""" ) as extracted_file: shutil.copyfileobj(__A , __A ) class A_ ( __a ): _A :Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(__A , exist_ok=__A ) lowercase = rarfile.RarFile(__A ) rf.extractall(__A ) rf.close() class A_ ( __a ): _A :Optional[Any] = [b'''\x28\xb5\x2F\xFD'''] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd lowercase = zstd.ZstdDecompressor() with open(__A , """rb""" ) as ifh, open(__A , """wb""" ) as ofh: dctx.copy_stream(__A , __A ) class A_ ( __a ): _A :int = [b'''\x42\x5A\x68'''] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): with bza.open(__A , """rb""" ) as compressed_file: with open(__A , """wb""" ) as extracted_file: shutil.copyfileobj(__A , __A ) class A_ ( __a ): _A :Dict = [b'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(__A , exist_ok=__A ) with pyazr.SevenZipFile(__A , """r""" ) as archive: archive.extractall(__A ) class A_ ( __a ): _A :List[str] = [b'''\x04\x22\x4D\x18'''] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ): if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(__A , """rb""" ) as compressed_file: with open(__A , """wb""" ) as extracted_file: shutil.copyfileobj(__A , __A ) class A_ : _A :List[str] = { '''tar''': TarExtractor, '''gzip''': GzipExtractor, '''zip''': ZipExtractor, '''xz''': XzExtractor, '''rar''': RarExtractor, '''zstd''': ZstdExtractor, '''bz2''': BzipaExtractor, '''7z''': SevenZipExtractor, # <Added version="2.4.0"/> '''lz4''': LzaExtractor, # <Added version="2.4.0"/> } @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any ): return max( len(__A ) for extractor in cls.extractors.values() if issubclass(__A , __A ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[Path, str] , snake_case__ : int ): try: return MagicNumberBaseExtractor.read_magic_number(__A , magic_number_length=__A ) except OSError: return b"" @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int , snake_case__ : Union[Path, str] , snake_case__ : bool = False ): warnings.warn( """Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use \'infer_extractor_format\' instead.""" , category=__A , ) lowercase = cls.infer_extractor_format(__A ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case__ : Union[Path, str] ): # <Added version="2.4.0"/> lowercase = cls._get_magic_number_max_length() lowercase = cls._read_magic_number(__A , __A ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(__A , magic_number=__A ): return extractor_format @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] , snake_case__ : Optional[str] = None , snake_case__ : Optional[BaseExtractor] = "deprecated" , ): os.makedirs(os.path.dirname(__A ) , exist_ok=__A ) # Prevent parallel extractions lowercase = str(Path(__A ).with_suffix(""".lock""" ) ) with FileLock(__A ): shutil.rmtree(__A , ignore_errors=__A ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(__A , __A ): # passed as positional arg warnings.warn( """Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use \'extractor_format\' instead.""" , category=__A , ) lowercase = extractor if extractor != """deprecated""" else extractor_format else: lowercase = cls.extractors[extractor_format] return extractor.extract(__A , __A ) else: warnings.warn( """Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=__A , ) for extractor in cls.extractors.values(): if extractor.is_extractable(__A ): return extractor.extract(__A , __A )
718
# using dfs for finding eulerian path traversal def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase = True, True lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) return path def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = -1 for i in range(lowerCAmelCase__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowercase = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return lowercase = 1 if check == 2: lowercase = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) print(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase = { 1: [], 2: [] # all degree is zero } lowercase = 10 check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) if __name__ == "__main__": main()
72
0
from itertools import product def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = sides_number lowercase = max_face_number * dice_number lowercase = [0] * (max_total + 1) lowercase = 1 lowercase = range(_lowerCamelCase ,max_face_number + 1 ) for dice_numbers in product(_lowerCamelCase ,repeat=_lowerCamelCase ): lowercase = sum(_lowerCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def UpperCamelCase__ ( ): lowercase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) lowercase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) lowercase = 0 lowercase = 9 lowercase = 4 * 9 lowercase = 6 for peter_total in range(_lowerCamelCase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowercase = (4**9) * (6**6) lowercase = peter_wins_count / total_games_number lowercase = round(_lowerCamelCase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
719
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class A_ ( unittest.TestCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_choices def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_attention_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A_ ( __a , unittest.TestCase ): _A :List[Any] = True _A :Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = FlaxRoFormerModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ ) lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ ) @require_flax class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case__ )[0] lowercase = 5_00_00 lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case__ ) lowercase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
72
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __SCREAMING_SNAKE_CASE : List[Any] ='Run commands across TPU VMs for initial setup before running `accelerate launch`.' def UpperCamelCase__ ( lowerCAmelCase__=None ): if subparsers is not None: lowercase = subparsers.add_parser("""tpu-config""" ,description=_description ) else: lowercase = argparse.ArgumentParser("""Accelerate tpu-config command""" ,description=_description ) # Core arguments lowercase = parser.add_argument_group( """Config Arguments""" ,"""Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" ,type=lowercase_ ,default=lowercase_ ,help="""Path to the config file to use for accelerate.""" ,) config_args.add_argument( """--tpu_name""" ,default=lowercase_ ,help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" ,) config_args.add_argument( """--tpu_zone""" ,default=lowercase_ ,help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" ,) lowercase = parser.add_argument_group("""TPU Arguments""" ,"""Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" ,action="""store_true""" ,help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" ,) pod_args.add_argument( """--command_file""" ,default=lowercase_ ,help="""The path to the file containing the commands to run on the pod on startup.""" ,) pod_args.add_argument( """--command""" ,action="""append""" ,nargs="""+""" ,help="""A command to run on the pod. Can be passed multiple times.""" ,) pod_args.add_argument( """--install_accelerate""" ,action="""store_true""" ,help="""Whether to install accelerate on the pod. Defaults to False.""" ,) pod_args.add_argument( """--accelerate_version""" ,default="""latest""" ,help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" ,) pod_args.add_argument( """--debug""" ,action="""store_true""" ,help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(lowercase_ ): lowercase = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: lowercase = defaults.command_file if not args.command and defaults.commands is not None: lowercase = defaults.commands if not args.tpu_name: lowercase = defaults.tpu_name if not args.tpu_zone: lowercase = defaults.tpu_zone if args.accelerate_version == "dev": lowercase = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": lowercase = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) ,lowercase_ ): lowercase = f"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file ,"""r""" ) as f: lowercase = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] ,lowercase_ ): lowercase = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate lowercase = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [f"""pip install {args.accelerate_version}"""] new_cmd += args.command lowercase = """; """.join(lowercase_ ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess lowercase = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(f"""Running {" ".join(lowercase_ )}""" ) return subprocess.run(lowercase_ ) print("""Successfully setup pod.""" ) def UpperCamelCase__ ( ): lowercase = tpu_command_parser() lowercase = parser.parse_args() tpu_command_launcher(lowercase_ )
720
import argparse import hashlib # hashlib is only used inside the Test class import struct class A_ : def __init__( self : List[str] , snake_case__ : Union[str, Any] ): lowercase = data lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ): lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64 for i in range(16 , 80 ): lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.padding() lowercase = self.split_blocks() for block in self.blocks: lowercase = self.expand_block(snake_case__ ) lowercase , lowercase , lowercase , lowercase , lowercase = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowercase = (b & c) | ((~b) & d) lowercase = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: lowercase = b ^ c ^ d lowercase = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: lowercase = (b & c) | (b & d) | (c & d) lowercase = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: lowercase = b ^ c ^ d lowercase = 0Xc_a_6_2_c_1_d_6 lowercase , lowercase , lowercase , lowercase , lowercase = ( self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(snake_case__ , 30 ), c, d, ) lowercase = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def UpperCamelCase__ ( ): lowercase = b"""Test String""" assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324 def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) lowercase = parser.parse_args() lowercase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: lowercase = f.read() else: lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" ) print(SHAaHash(lowerCAmelCase__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
72
0
from math import isqrt, loga def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = False return [i for i in range(2 ,lowerCAmelCase__ ) if is_prime[i]] def UpperCamelCase__ ( lowerCAmelCase__ = 800_800 ,lowerCAmelCase__ = 800_800 ): lowercase = degree * loga(lowerCAmelCase__ ) lowercase = int(lowerCAmelCase__ ) lowercase = calculate_prime_numbers(lowerCAmelCase__ ) lowercase = 0 lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
721
class A_ : def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ): lowercase = name lowercase = val def __str__( self : str ): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : int , snake_case__ : Optional[int] ): return self.val < other.val class A_ : def __init__( self : str , snake_case__ : List[str] ): lowercase = {} lowercase = {} lowercase = self.build_heap(snake_case__ ) def __getitem__( self : Union[str, Any] , snake_case__ : int ): return self.get_value(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ): return (idx - 1) // 2 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ): return idx * 2 + 1 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] ): return idx * 2 + 2 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ): return self.heap_dict[key] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ): lowercase = len(snake_case__ ) - 1 lowercase = self.get_parent_idx(snake_case__ ) for idx, i in enumerate(snake_case__ ): lowercase = idx lowercase = i.val for i in range(snake_case__ , -1 , -1 ): self.sift_down(snake_case__ , snake_case__ ) return array def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : str ): while True: lowercase = self.get_left_child_idx(snake_case__ ) # noqa: E741 lowercase = self.get_right_child_idx(snake_case__ ) lowercase = idx if l < len(snake_case__ ) and array[l] < array[idx]: lowercase = l if r < len(snake_case__ ) and array[r] < array[smallest]: lowercase = r if smallest != idx: lowercase , lowercase = array[smallest], array[idx] ( ( lowercase ) , ( lowercase ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase = smallest else: break def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ): lowercase = self.get_parent_idx(snake_case__ ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase , lowercase = self.heap[idx], self.heap[p] lowercase , lowercase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase = p lowercase = self.get_parent_idx(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): return self.heap[0] def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase , lowercase = self.heap[-1], self.heap[0] lowercase , lowercase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): self.heap.append(snake_case__ ) lowercase = len(self.heap ) - 1 lowercase = node.val self.sift_up(len(self.heap ) - 1 ) def SCREAMING_SNAKE_CASE__ ( self : int ): return len(self.heap ) == 0 def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase = new_value lowercase = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE : Any =Node('''R''', -1) __SCREAMING_SNAKE_CASE : Union[str, Any] =Node('''B''', 6) __SCREAMING_SNAKE_CASE : str =Node('''A''', 3) __SCREAMING_SNAKE_CASE : List[Any] =Node('''X''', 1) __SCREAMING_SNAKE_CASE : str =Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE : Any =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
72
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( __a , unittest.TestCase ): _A :Dict = OpenAIGPTTokenizer _A :int = OpenAIGPTTokenizerFast _A :int = True _A :Optional[int] = False def SCREAMING_SNAKE_CASE__ ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowercase = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowercase = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""] lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Union[str, Any] ): return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowercase = """lower""" lowercase = ["""low""", """er</w>"""] lowercase = tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowercase = tokens + ["""<unk>"""] lowercase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) # Simple input lowercase = """This is a simple input""" lowercase = ["""This is a simple input 1""", """This is a simple input 2"""] lowercase = ("""This is a simple input""", """This is a pair""") lowercase = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Simple input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Simple input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) # Pair input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): pass @require_ftfy @require_spacy @require_tokenizers class A_ ( __a ): pass
700
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig''' # Base docstring __SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat''' __SCREAMING_SNAKE_CASE : Tuple =[ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ): if drop_prob == 0.0 or not training: return input lowercase = 1 - drop_prob lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize lowercase = input.div(lowerCAmelCase__ ) * random_tensor return output class A_ ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ): super().__init__() lowercase = drop_prob def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ): return drop_path(snake_case__ , self.drop_prob , self.training ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return "p={}".format(self.drop_prob ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ): super().__init__() lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride) lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding) lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ ) lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ): lowercase = self.projection(snake_case__ ) lowercase = self.norm(snake_case__ ) return embeddings class A_ ( nn.GroupNorm ): def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ): super().__init__(1 , snake_case__ , **snake_case__ ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any ): super().__init__() lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ): return self.pool(snake_case__ ) - hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ): super().__init__() lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = PoolFormerDropPath(snake_case__ ) if isinstance(config.hidden_act , snake_case__ ): lowercase = ACTaFN[config.hidden_act] else: lowercase = config.hidden_act def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ): lowercase = self.conva(snake_case__ ) lowercase = self.act_fn(snake_case__ ) lowercase = self.drop(snake_case__ ) lowercase = self.conva(snake_case__ ) lowercase = self.drop(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ): super().__init__() lowercase = PoolFormerPooling(snake_case__ ) lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) # Useful for training neural nets lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity() lowercase = config.use_layer_scale if config.use_layer_scale: lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): if self.use_layer_scale: lowercase = self.pooling(self.before_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = () lowercase = self.output(self.after_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = (output,) + outputs return outputs else: lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) ) # First residual connection lowercase = pooling_output + hidden_states lowercase = () # Second residual connection inside the PoolFormerOutput block lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) ) lowercase = hidden_states + layer_output lowercase = (output,) + outputs return outputs class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): super().__init__() lowercase = config # stochastic depth decay rule lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase = nn.ModuleList(snake_case__ ) # Transformer blocks lowercase = [] lowercase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case__ ) ) lowercase = nn.ModuleList(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ): lowercase = () if output_hidden_states else None lowercase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase , lowercase = layers # Get patch embeddings from hidden_states lowercase = embedding_layer(snake_case__ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case__ ): lowercase = blk(snake_case__ ) lowercase = layer_outputs[0] if output_hidden_states: lowercase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class A_ ( __a ): _A :Any = PoolFormerConfig _A :int = '''poolformer''' _A :Union[str, Any] = '''pixel_values''' _A :str = True def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): if isinstance(snake_case__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ): if isinstance(snake_case__ , snake_case__ ): lowercase = value __SCREAMING_SNAKE_CASE : Optional[Any] =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __SCREAMING_SNAKE_CASE : str =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , ) class A_ ( __a ): def __init__( self : Union[str, Any] , snake_case__ : int ): super().__init__(snake_case__ ) lowercase = config lowercase = PoolFormerEncoder(snake_case__ ) # Initialize weights and apply final processing self.post_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowercase = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[int] ): super().__init__() lowercase = nn.Linear(config.hidden_size , config.hidden_size ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = self.dense(snake_case__ ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , __a , ) class A_ ( __a ): def __init__( self : Dict , snake_case__ : Any ): super().__init__(snake_case__ ) lowercase = config.num_labels lowercase = PoolFormerModel(snake_case__ ) # Final norm lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = self.poolformer( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = outputs[0] lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) ) lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase = """single_label_classification""" else: lowercase = """multi_label_classification""" if self.config.problem_type == "regression": lowercase = MSELoss() if self.num_labels == 1: lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase = loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": lowercase = CrossEntropyLoss() lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase = BCEWithLogitsLoss() lowercase = loss_fct(snake_case__ , snake_case__ ) if not return_dict: lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
72
0
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self : Any , snake_case__ : Any , snake_case__ : Optional[int]=13 , snake_case__ : Optional[Any]=30 , snake_case__ : Tuple=2 , snake_case__ : Any=3 , snake_case__ : Any=True , snake_case__ : List[Any]=True , snake_case__ : str=32 , snake_case__ : str=5 , snake_case__ : Any=4 , snake_case__ : Union[str, Any]=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[int]=0.02 , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=2 , ): lowercase = parent lowercase = batch_size lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = type_sequence_label_size lowercase = initializer_range lowercase = scope lowercase = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase = (image_size // patch_size) ** 2 lowercase = num_patches + 1 def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : Any , snake_case__ : Tuple ): lowercase = ViTModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : str ): lowercase = ViTForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase = 1 lowercase = ViTForMaskedImageModeling(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): lowercase = self.type_sequence_label_size lowercase = ViTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase = 1 lowercase = ViTForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ) = config_and_inputs lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A_ ( snake_case_ , snake_case_ , unittest.TestCase ): _A :Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _A :List[Any] = ( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) _A :int = True _A :int = False _A :str = False _A :Optional[Any] = False def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = ViTModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : str ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(snake_case__ ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = ViTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def UpperCamelCase__ ( ): lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(snake_case__ ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ ) # forward pass with torch.no_grad(): lowercase = model(**snake_case__ ) # verify the logits lowercase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowercase = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. lowercase = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(snake_case__ ) lowercase = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 ) lowercase = prepare_img() lowercase = image_processor(images=snake_case__ , return_tensors="""pt""" ) lowercase = inputs.pixel_values.to(snake_case__ ) # forward pass with torch.no_grad(): lowercase = model(snake_case__ , interpolate_pos_encoding=snake_case__ ) # verify the logits lowercase = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , snake_case__ ) lowercase = torch.tensor( [[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=snake_case__ , return_tensors="""pt""" ) lowercase = inputs.pixel_values.to(snake_case__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase = model(snake_case__ )
701
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = [[1, 2, 4], [1, 2, 3, 4]] lowercase = DisjunctiveConstraint(lowercase__ ) self.assertTrue(isinstance(dc.token_ids , lowercase__ ) ) with self.assertRaises(lowercase__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(lowercase__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(lowercase__ ): DisjunctiveConstraint(lowercase__ ) # fails here def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = [[1, 2, 3], [1, 2, 4]] lowercase = DisjunctiveConstraint(lowercase__ ) lowercase = dc.update(1 ) lowercase = stepped is True and completed is False and reset is False self.assertTrue(lowercase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase = dc.update(2 ) lowercase = stepped is True and completed is False and reset is False self.assertTrue(lowercase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase = dc.update(3 ) lowercase = stepped is True and completed is True and reset is False self.assertTrue(lowercase__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase = DisjunctiveConstraint(lowercase__ ) lowercase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
702
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
0
from string import ascii_uppercase __SCREAMING_SNAKE_CASE : Tuple ={str(ord(c) - 55): c for c in ascii_uppercase} def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): raise TypeError("""int() can\'t convert non-string with explicit base""" ) if num < 0: raise ValueError("""parameter must be positive int""" ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): raise TypeError("""\'str\' object cannot be interpreted as an integer""" ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): raise TypeError("""\'float\' object cannot be interpreted as an integer""" ) if base in (0, 1): raise ValueError("""base must be >= 2""" ) if base > 36: raise ValueError("""base must be <= 36""" ) lowercase = '' lowercase = 0 lowercase = 0 while div != 1: lowercase = divmod(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) if base >= 11 and 9 < mod < 36: lowercase = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE_ )] else: lowercase = str(SCREAMING_SNAKE_CASE_ ) new_value += actual_value lowercase = num // base lowercase = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(SCREAMING_SNAKE_CASE_ ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
703
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A_ ( __a ): _A :Tuple = '''data2vec-audio''' def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ): super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) lowercase = hidden_size lowercase = feat_extract_activation lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = conv_bias lowercase = num_conv_pos_embeddings lowercase = num_conv_pos_embedding_groups lowercase = conv_pos_kernel_size lowercase = len(self.conv_dim ) lowercase = num_hidden_layers lowercase = intermediate_size lowercase = hidden_act lowercase = num_attention_heads lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = feat_proj_dropout lowercase = final_dropout lowercase = layerdrop lowercase = layer_norm_eps lowercase = initializer_range lowercase = vocab_size lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase = mask_time_prob lowercase = mask_time_length lowercase = mask_time_min_masks lowercase = mask_feature_prob lowercase = mask_feature_length lowercase = mask_feature_min_masks # ctc loss lowercase = ctc_loss_reduction lowercase = ctc_zero_infinity # adapter lowercase = add_adapter lowercase = adapter_kernel_size lowercase = adapter_stride lowercase = num_adapter_layers lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return math.prod(self.conv_stride )
72
0
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowercase ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): for model_name in ["bert-base-uncased"]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModel.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModel.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): for model_name in ["bert-base-uncased"]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelForPreTraining.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelForPreTraining.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelForCausalLM.from_pretrained(snake_case__ , from_pt=snake_case__ ) lowercase = TFAutoModelForCausalLM.from_pretrained( snake_case__ , output_loading_info=snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelForCausalLM.from_pretrained(snake_case__ , from_tf=snake_case__ ) lowercase = AutoModelForCausalLM.from_pretrained( snake_case__ , output_loading_info=snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelWithLMHead.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelWithLMHead.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelForMaskedLM.from_pretrained(snake_case__ , from_pt=snake_case__ ) lowercase = TFAutoModelForMaskedLM.from_pretrained( snake_case__ , output_loading_info=snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelForMaskedLM.from_pretrained(snake_case__ , from_tf=snake_case__ ) lowercase = AutoModelForMaskedLM.from_pretrained( snake_case__ , output_loading_info=snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ , from_pt=snake_case__ ) lowercase = TFAutoModelForSeqaSeqLM.from_pretrained( snake_case__ , output_loading_info=snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ , from_tf=snake_case__ ) lowercase = AutoModelForSeqaSeqLM.from_pretrained( snake_case__ , output_loading_info=snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Tuple ): for model_name in ["bert-base-uncased"]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelForSequenceClassification.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): for model_name in ["bert-base-uncased"]: lowercase = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = TFAutoModelForQuestionAnswering.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase = AutoModelForQuestionAnswering.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = TFAutoModelWithLMHead.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 1_44_10 ) lowercase = AutoModelWithLMHead.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 1_44_10 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = TFAutoModelWithLMHead.from_pretrained(snake_case__ , from_pt=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 1_44_10 ) lowercase = AutoModelWithLMHead.from_pretrained(snake_case__ , from_tf=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) , 1_44_10 )
704
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" ) lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] ) lowercase = checkpoint["""model"""] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()} lowercase = XGLMConfig( vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase = XGLMForCausalLM(lowerCAmelCase__ ) lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[int] ={ '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class A_ ( __lowercase ): _A :str = '''gpt_neox''' def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any]=5_04_32 , snake_case__ : List[Any]=61_44 , snake_case__ : int=44 , snake_case__ : int=64 , snake_case__ : Optional[Any]=2_45_76 , snake_case__ : Any="gelu" , snake_case__ : Tuple=0.25 , snake_case__ : Union[str, Any]=1_00_00 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.0 , snake_case__ : int=0.1 , snake_case__ : List[str]=20_48 , snake_case__ : Dict=0.02 , snake_case__ : Optional[Any]=1E-5 , snake_case__ : Tuple=True , snake_case__ : List[Any]=0 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : Any=None , **snake_case__ : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) lowercase = vocab_size lowercase = max_position_embeddings lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = rotary_pct lowercase = rotary_emb_base lowercase = attention_dropout lowercase = hidden_dropout lowercase = classifier_dropout lowercase = initializer_range lowercase = layer_norm_eps lowercase = use_cache lowercase = tie_word_embeddings lowercase = use_parallel_residual lowercase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F"""got {self.rope_scaling}""" ) lowercase = self.rope_scaling.get("""type""" , _A ) lowercase = self.rope_scaling.get("""factor""" , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
705
from __future__ import annotations import bisect def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left <= right: lowercase = left + (right - left) // 2 lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase = midpoint - 1 else: lowercase = midpoint + 1 return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ) if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if right < left: return None lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip() __SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''',''')) __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n''')) __SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
72
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): lowercase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ): for i in range(config.num_hidden_layers ): if base_model: lowercase = "" else: lowercase = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" ) lowercase = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase = in_proj_weight[ : config.hidden_size, : ] lowercase = in_proj_bias[: config.hidden_size] lowercase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase = in_proj_weight[ -config.hidden_size :, : ] lowercase = in_proj_bias[-config.hidden_size :] def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = dct.pop(lowerCAmelCase_ ) lowercase = val def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = ViTMSNConfig() lowercase = 1_000 lowercase = "datasets/huggingface/label-files" lowercase = "imagenet-1k-id2label.json" lowercase = json.load(open(hf_hub_download(lowerCAmelCase_ ,lowerCAmelCase_ ) ,"""r""" ) ) lowercase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowercase = 384 lowercase = 1_536 lowercase = 6 elif "l16" in checkpoint_url: lowercase = 1_024 lowercase = 4_096 lowercase = 24 lowercase = 16 lowercase = 0.1 elif "b4" in checkpoint_url: lowercase = 4 elif "l7" in checkpoint_url: lowercase = 7 lowercase = 1_024 lowercase = 4_096 lowercase = 24 lowercase = 16 lowercase = 0.1 lowercase = ViTMSNModel(lowerCAmelCase_ ) lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location="""cpu""" )["target_encoder"] lowercase = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCAmelCase_ ) lowercase = create_rename_keys(lowerCAmelCase_ ,base_model=lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ ,lowerCAmelCase_ ,base_model=lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase = Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ) lowercase = ViTImageProcessor( size=config.image_size ,image_mean=lowerCAmelCase_ ,image_std=lowerCAmelCase_ ) lowercase = image_processor(images=lowerCAmelCase_ ,return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowercase = model(**lowerCAmelCase_ ) lowercase = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowercase = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: lowercase = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: lowercase = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: lowercase = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: lowercase = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] ,lowerCAmelCase_ ,atol=1E-4 ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __SCREAMING_SNAKE_CASE : List[str] =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
706
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = multiprocessing.Manager() lowercase = manager.list() lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase = shutil.rmtree lowercase = os.rmdir lowercase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase = {} with swallow_io(): with time_limit(lowerCAmelCase__ ): exec(lowerCAmelCase__ ,lowerCAmelCase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase = rmtree lowercase = rmdir lowercase = chdir @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ ) signal.signal(signal.SIGALRM ,lowerCAmelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0 ) @contextlib.contextmanager def UpperCamelCase__ ( ): lowercase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase__ ): with contextlib.redirect_stderr(lowerCAmelCase__ ): with redirect_stdin(lowerCAmelCase__ ): yield @contextlib.contextmanager def UpperCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase__ ): yield dirname class A_ ( __a ): pass class A_ ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ): return False class A_ ( contextlib._RedirectStream ): # type: ignore _A :List[Any] = '''stdin''' @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): if root == ".": yield return lowercase = os.getcwd() os.chdir(lowerCAmelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase = None lowercase = None import os lowercase = """1""" lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None import shutil lowercase = None lowercase = None lowercase = None import subprocess lowercase = None # type: ignore lowercase = None import sys lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None
72
0
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def UpperCamelCase__ ( lowerCAmelCase__ ): if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def UpperCamelCase__ ( lowerCAmelCase__ ): for char in word: lowercase = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = set() for token in tokens: lowercase = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowercase = list(_snake_case ) return word_list def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if not chinese_word_set: return bert_tokens lowercase = max([len(_snake_case ) for w in chinese_word_set] ) lowercase = bert_tokens lowercase , lowercase = 0, len(_snake_case ) while start < end: lowercase = True if is_chinese(bert_word[start] ): lowercase = min(end - start ,_snake_case ) for i in range(_snake_case ,1 ,-1 ): lowercase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): lowercase = """##""" + bert_word[j] lowercase = start + i lowercase = False break if single_word: start += 1 return bert_word def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] for i in range(0 ,len(_snake_case ) ,100 ): lowercase = ltp_tokenizer.pipeline(lines[i : i + 100] ,tasks=["""cws"""] ).cws lowercase = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowercase = [] for i in range(0 ,len(_snake_case ) ,100 ): lowercase = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=_snake_case ,truncation=_snake_case ,max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(_snake_case ) == len(_snake_case ) lowercase = [] for input_ids, chinese_word in zip(_snake_case ,_snake_case ): lowercase = [] for id in input_ids: lowercase = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowercase = add_sub_symbol(_snake_case ,_snake_case ) lowercase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowercase = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def UpperCamelCase__ ( lowerCAmelCase__ ): with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f: lowercase = f.readlines() lowercase = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowercase = LTP(args.ltp ) # faster in GPU device lowercase = BertTokenizer.from_pretrained(args.bert ) lowercase = prepare_ref(_snake_case ,_snake_case ,_snake_case ) with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f: lowercase = [json.dumps(_snake_case ) + """\n""" for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) __SCREAMING_SNAKE_CASE : List[Any] =parser.parse_args() main(args)
707
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __a ): _A :Optional[int] = ['''image_processor''', '''tokenizer'''] _A :Tuple = '''BlipImageProcessor''' _A :List[Any] = '''AutoTokenizer''' def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Dict ): lowercase = False super().__init__(snake_case__ , snake_case__ ) lowercase = self.image_processor def __call__( self : List[str] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowercase = self.tokenizer lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : int , **snake_case__ : int ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
72
0
from typing import Any def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,): '''simple docstring''' _validation( _A ,_A ,_A ,_A ,_A ,) # Creates data structures and fill initial step lowercase = {} lowercase = {} for state in states_space: lowercase = observations_space[0] lowercase = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowercase = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 ,len(_A ) ): lowercase = observations_space[o] lowercase = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowercase = """""" lowercase = -1 for k_state in states_space: lowercase = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowercase = probability lowercase = k_state # Update probabilities and pointers dicts lowercase = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowercase = arg_max # The final observation lowercase = observations_space[len(_A ) - 1] # argmax for given final observation lowercase = """""" lowercase = -1 for k_state in states_space: lowercase = probabilities[(k_state, final_observation)] if probability > max_probability: lowercase = probability lowercase = k_state lowercase = arg_max # Process pointers backwards lowercase = last_state lowercase = [] for o in range(len(_A ) - 1 ,-1 ,-1 ): result.append(_A ) lowercase = pointers[previous, observations_space[o]] result.reverse() return result def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,): '''simple docstring''' _validate_not_empty( _A ,_A ,_A ,_A ,_A ,) _validate_lists(_A ,_A ) _validate_dicts( _A ,_A ,_A ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): '''simple docstring''' _validate_list(_A ,"""observations_space""" ) _validate_list(_A ,"""states_space""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): '''simple docstring''' if not isinstance(_object ,_A ): lowercase = f"""{var_name} must be a list""" raise ValueError(_A ) else: for x in _object: if not isinstance(_A ,_A ): lowercase = f"""{var_name} must be a list of strings""" raise ValueError(_A ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,): '''simple docstring''' _validate_dict(_A ,"""initial_probabilities""" ,_A ) _validate_nested_dict(_A ,"""transition_probabilities""" ) _validate_nested_dict(_A ,"""emission_probabilities""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): '''simple docstring''' _validate_dict(_object ,_A ,_A ) for x in _object.values(): _validate_dict(_A ,_A ,_A ,_A ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = False ): '''simple docstring''' if not isinstance(_object ,_A ): lowercase = f"""{var_name} must be a dict""" raise ValueError(_A ) if not all(isinstance(_A ,_A ) for x in _object ): lowercase = f"""{var_name} all keys must be strings""" raise ValueError(_A ) if not all(isinstance(_A ,_A ) for x in _object.values() ): lowercase = """nested dictionary """ if nested else """""" lowercase = f"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(_A ) if __name__ == "__main__": from doctest import testmod testmod()
708
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCamelCase__ ( lowerCAmelCase__ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase = model_type_to_module_name(lowerCAmelCase__ ) lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" ) try: return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase = importlib.import_module("""transformers""" ) if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ): return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,): lowercase = get_file_from_repo( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader: return json.load(lowerCAmelCase__ ) class A_ : def __init__( self : List[Any] ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ): lowercase = kwargs.pop("""config""" , snake_case__ ) lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ ) lowercase = True lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ ) lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ ) lowercase = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case__ , snake_case__ ): lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) # It could be in `config.feature_extractor_type`` lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ ) if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: lowercase = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: lowercase = feature_extractor_class_from_name(snake_case__ ) lowercase = feature_extractor_auto_map is not None lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING lowercase = resolve_trust_remote_code( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if has_remote_code and trust_remote_code: lowercase = get_class_from_dynamic_module( snake_case__ , snake_case__ , **snake_case__ ) lowercase = kwargs.pop("""code_revision""" , snake_case__ ) if os.path.isdir(snake_case__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )] return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ): FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
72
0
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__) class A_ ( _A ): def __init__( self : Tuple , *snake_case__ : Dict , **snake_case__ : List[str] ): warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
709
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''') __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : List[str] ={ **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Optional[int] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : List[str] =[ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value elif weight_type == "running_mean": lowercase = value elif weight_type == "running_var": lowercase = value elif weight_type == "num_batches_tracked": lowercase = value else: lowercase = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] if task == "s2t": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2T lowercase = IGNORE_KEYS_S2T elif task == "t2s": lowercase = None lowercase = MAPPING_T2S lowercase = IGNORE_KEYS_T2S elif task == "s2s": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2S lowercase = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowercase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: lowercase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase = True if "*" in mapped_key: lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: lowercase = """weight""" elif "running_mean" in name: lowercase = """running_mean""" elif "running_var" in name: lowercase = """running_var""" elif "num_batches_tracked" in name: lowercase = """num_batches_tracked""" else: lowercase = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,): if config_path is not None: lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = SpeechTaConfig() if task == "s2t": lowercase = config.max_text_positions lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": lowercase = 1_876 lowercase = 600 lowercase = config.max_speech_positions lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": lowercase = 1_876 lowercase = config.max_speech_positions lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) lowercase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowercase = SpeechTaFeatureExtractor() lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowercase = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
72
0
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class A_ ( nn.Module ): def __init__( self : Union[str, Any] ): super().__init__() lowercase = nn.Linear(3 , 4 ) lowercase = nn.BatchNormad(4 ) lowercase = nn.Linear(4 , 5 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): return self.lineara(self.batchnorm(self.lineara(_A ) ) ) class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , model.state_dict() ) lowercase = os.path.join(_A , """index.json""" ) self.assertTrue(os.path.isfile(_A ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: lowercase = os.path.join(_A , F"""{key}.dat""" ) self.assertTrue(os.path.isfile(_A ) ) # TODO: add tests on the fact weights are properly loaded def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: lowercase = torch.randn(2 , 3 , dtype=_A ) with TemporaryDirectory() as tmp_dir: lowercase = offload_weight(_A , """weight""" , _A , {} ) lowercase = os.path.join(_A , """weight.dat""" ) self.assertTrue(os.path.isfile(_A ) ) self.assertDictEqual(_A , {"""weight""": {"""shape""": [2, 3], """dtype""": str(_A ).split(""".""" )[1]}} ) lowercase = load_offloaded_weight(_A , index["""weight"""] ) self.assertTrue(torch.equal(_A , _A ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = ModelForTest() lowercase = model.state_dict() lowercase = {k: v for k, v in state_dict.items() if 'linear2' not in k} lowercase = {k: v for k, v in state_dict.items() if 'linear2' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A ) lowercase = OffloadedWeightsLoader(state_dict=_A , save_folder=_A ) # Every key is there with the right value self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key] ) ) lowercase = {k: v for k, v in state_dict.items() if 'weight' in k} lowercase = {k: v for k, v in state_dict.items() if 'weight' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A ) lowercase = OffloadedWeightsLoader(state_dict=_A , save_folder=_A ) # Every key is there with the right value self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(_A , _A ) # Duplicates are removed lowercase = OffloadedWeightsLoader(state_dict=_A , save_folder=_A ) # Every key is there with the right value self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(_A , weight_map[key] ) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = {'a.1': 0, 'a.10': 1, 'a.2': 2} lowercase = extract_submodules_state_dict(_A , ["""a.1""", """a.2"""] ) self.assertDictEqual(_A , {"""a.1""": 0, """a.2""": 2} ) lowercase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2} lowercase = extract_submodules_state_dict(_A , ["""a.1""", """a.2"""] ) self.assertDictEqual(_A , {"""a.1.a""": 0, """a.2.a""": 2} )
710
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE : List[Any] ='''.''' if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : Dict =[] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE : Optional[Any] =line.strip() __SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
0
from math import pi, sqrt, tan def UpperCamelCase__ ( lowerCAmelCase__ ): if side_length < 0: raise ValueError("""surface_area_cube() only accepts non-negative values""" ) return 6 * side_length**2 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if length < 0 or breadth < 0 or height < 0: raise ValueError("""surface_area_cuboid() only accepts non-negative values""" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def UpperCamelCase__ ( lowerCAmelCase__ ): if radius < 0: raise ValueError("""surface_area_sphere() only accepts non-negative values""" ) return 4 * pi * radius**2 def UpperCamelCase__ ( lowerCAmelCase__ ): if radius < 0: raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" ) return 3 * pi * radius**2 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if radius < 0 or height < 0: raise ValueError("""surface_area_cone() only accepts non-negative values""" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( """surface_area_conical_frustum() only accepts non-negative values""" ) lowercase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if radius < 0 or height < 0: raise ValueError("""surface_area_cylinder() only accepts non-negative values""" ) return 2 * pi * radius * (height + radius) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if torus_radius < 0 or tube_radius < 0: raise ValueError("""surface_area_torus() only accepts non-negative values""" ) if torus_radius < tube_radius: raise ValueError( """surface_area_torus() does not support spindle or self intersecting tori""" ) return 4 * pow(__snake_case ,2 ) * torus_radius * tube_radius def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if length < 0 or width < 0: raise ValueError("""area_rectangle() only accepts non-negative values""" ) return length * width def UpperCamelCase__ ( lowerCAmelCase__ ): if side_length < 0: raise ValueError("""area_square() only accepts non-negative values""" ) return side_length**2 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if base < 0 or height < 0: raise ValueError("""area_triangle() only accepts non-negative values""" ) return (base * height) / 2 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("""Given three sides do not form a triangle""" ) lowercase = (sidea + sidea + sidea) / 2 lowercase = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if base < 0 or height < 0: raise ValueError("""area_parallelogram() only accepts non-negative values""" ) return base * height def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if basea < 0 or basea < 0 or height < 0: raise ValueError("""area_trapezium() only accepts non-negative values""" ) return 1 / 2 * (basea + basea) * height def UpperCamelCase__ ( lowerCAmelCase__ ): if radius < 0: raise ValueError("""area_circle() only accepts non-negative values""" ) return pi * radius**2 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if radius_x < 0 or radius_y < 0: raise ValueError("""area_ellipse() only accepts non-negative values""" ) return pi * radius_x * radius_y def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("""area_rhombus() only accepts non-negative values""" ) return 1 / 2 * diagonal_a * diagonal_a def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if not isinstance(__snake_case ,__snake_case ) or sides < 3: raise ValueError( """area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides""" ) elif length < 0: raise ValueError( """area_reg_polygon() only accepts non-negative values as \ length of a side""" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(f'''Rectangle: {area_rectangle(10, 20) = }''') print(f'''Square: {area_square(10) = }''') print(f'''Triangle: {area_triangle(10, 10) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''') print(f'''Parallelogram: {area_parallelogram(10, 20) = }''') print(f'''Rhombus: {area_rhombus(10, 20) = }''') print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''') print(f'''Circle: {area_circle(20) = }''') print(f'''Ellipse: {area_ellipse(10, 20) = }''') print('''\nSurface Areas of various geometric shapes: \n''') print(f'''Cube: {surface_area_cube(20) = }''') print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''') print(f'''Sphere: {surface_area_sphere(20) = }''') print(f'''Hemisphere: {surface_area_hemisphere(20) = }''') print(f'''Cone: {surface_area_cone(10, 20) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''') print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''') print(f'''Torus: {surface_area_torus(20, 10) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''') print(f'''Square: {area_reg_polygon(4, 10) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
0
from math import factorial def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if n < k or k < 0: raise ValueError("""Please enter positive integers for n and k where n >= k""" ) return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k )) if __name__ == "__main__": print( '''The number of five-card hands possible from a standard''', f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( '''If a class of 40 students must be arranged into groups of''', f'''4 for group projects, there are {combinations(40, 4)} ways''', '''to arrange them.\n''', ) print( '''If 10 teams are competing in a Formula One race, there''', f'''are {combinations(10, 3)} ways that first, second and''', '''third place can be awarded.''', )
712
import argparse import os import re import packaging.version __SCREAMING_SNAKE_CASE : Optional[int] ='''examples/''' __SCREAMING_SNAKE_CASE : Any ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __SCREAMING_SNAKE_CASE : Any ='''README.md''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.read() lowercase , lowercase = REPLACE_PATTERNS[pattern] lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ ) lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): for folder, directories, fnames in os.walk(lowerCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) if not patch: update_version_in_examples(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = """🤗 Transformers currently provides the following architectures""" lowercase = """1. Want to contribute a new model?""" with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.readlines() # Find the start of the list. lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowerCAmelCase__ ) def UpperCamelCase__ ( ): with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: lowercase = f.read() lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0] return packaging.version.parse(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=False ): lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase = default_version.base_version elif patch: lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def UpperCamelCase__ ( ): lowercase = get_version() lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase = current_version.base_version # Check with the user we got that right. lowercase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
72
0
import sys from collections import defaultdict class A_ : def __init__( self : Optional[int] ): lowercase = [] def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[Any] ): return self.node_position[vertex] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int ): lowercase = pos def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowercase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowercase = 2 * start + 1 else: lowercase = 2 * start + 2 if heap[smallest_child] < heap[start]: lowercase , lowercase = heap[smallest_child], positions[smallest_child] lowercase , lowercase = ( heap[start], positions[start], ) lowercase , lowercase = temp, tempa lowercase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , UpperCamelCase_ ) self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): lowercase = position[index] while index != 0: lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowercase = heap[parent] lowercase = position[parent] self.set_position(position[parent] , UpperCamelCase_ ) else: lowercase = val lowercase = temp self.set_position(UpperCamelCase_ , UpperCamelCase_ ) break lowercase = parent else: lowercase = val lowercase = temp self.set_position(UpperCamelCase_ , 0 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Tuple ): lowercase = len(UpperCamelCase_ ) // 2 - 1 for i in range(UpperCamelCase_ , -1 , -1 ): self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): lowercase = positions[0] lowercase = sys.maxsize self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) , UpperCamelCase_ ) return temp def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = Heap() lowercase = [0] * len(A__ ) lowercase = [-1] * len(A__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowercase = [] # Heap of Distance of vertices from their neighboring vertex lowercase = [] for vertex in range(len(A__ ) ): distance_tv.append(sys.maxsize ) positions.append(A__ ) heap.node_position.append(A__ ) lowercase = [] lowercase = 1 lowercase = sys.maxsize for neighbor, distance in adjacency_list[0]: lowercase = 0 lowercase = distance heap.heapify(A__ ,A__ ) for _ in range(1 ,len(A__ ) ): lowercase = heap.delete_minimum(A__ ,A__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowercase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(A__ )] ): lowercase = distance heap.bottom_to_top( A__ ,heap.get_position(A__ ) ,A__ ,A__ ) lowercase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __SCREAMING_SNAKE_CASE : List[Any] =int(input('''Enter number of edges: ''').strip()) __SCREAMING_SNAKE_CASE : int =defaultdict(list) for _ in range(edges_number): __SCREAMING_SNAKE_CASE : Dict =[int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
713
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple ={ '''google/pix2struct-textcaps-base''': ( '''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json''' ), } class A_ ( __a ): _A :List[str] = '''pix2struct_text_model''' _A :int = ['''past_key_values'''] _A :Optional[Any] = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ): lowercase = vocab_size lowercase = hidden_size lowercase = d_kv lowercase = d_ff lowercase = num_layers lowercase = num_heads lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = dropout_rate lowercase = layer_norm_epsilon lowercase = initializer_factor lowercase = use_cache lowercase = eos_token_id lowercase = decoder_start_token_id # for backwards compatibility lowercase = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :Optional[int] = '''pix2struct_vision_model''' def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowercase = hidden_size lowercase = patch_embed_hidden_size lowercase = d_ff lowercase = dropout_rate lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = initializer_range lowercase = initializer_factor lowercase = attention_dropout lowercase = layer_norm_eps lowercase = dense_act_fn lowercase = seq_len lowercase = relative_attention_num_buckets lowercase = relative_attention_max_distance lowercase = d_kv @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ): cls._set_token_in_kwargs(snake_case__ ) lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class A_ ( __a ): _A :int = '''pix2struct''' _A :str = True def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: lowercase = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) lowercase = PixaStructTextConfig(**snake_case__ ) lowercase = PixaStructVisionConfig(**snake_case__ ) lowercase = self.text_config.decoder_start_token_id lowercase = self.text_config.pad_token_id lowercase = self.text_config.eos_token_id lowercase = initializer_factor lowercase = initializer_range lowercase = self.initializer_range lowercase = self.initializer_range lowercase = is_vqa @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
72
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A_ : def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=13 , snake_case__ : List[Any]=7 , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : List[str]=True , snake_case__ : Tuple=99 , snake_case__ : Optional[Any]=64 , snake_case__ : int=5 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Dict=5_12 , snake_case__ : str=16 , snake_case__ : Any=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : str=None , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_input_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_labels lowercase = num_choices lowercase = scope lowercase = vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_input_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__ ( self : int ): return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = self.prepare_config_and_inputs() lowercase = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any ): lowercase = GPTNeoXModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ , attention_mask=snake_case__ ) lowercase = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): lowercase = True lowercase = GPTNeoXModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ , attention_mask=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): lowercase = GPTNeoXForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any ): lowercase = self.num_labels lowercase = GPTNeoXForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ , attention_mask=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str ): lowercase = self.num_labels lowercase = GPTNeoXForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : int ): lowercase = self.num_labels lowercase = GPTNeoXForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Any ): lowercase = True lowercase = GPTNeoXForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() # first forward pass lowercase = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ ) lowercase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase = model(snake_case__ , attention_mask=snake_case__ , output_hidden_states=snake_case__ ) lowercase = output_from_no_past["hidden_states"][0] lowercase = model( snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = self.prepare_config_and_inputs() lowercase = config_and_inputs lowercase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): _A :List[str] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) _A :Dict = (GPTNeoXForCausalLM,) if is_torch_available() else () _A :Optional[int] = ( { '''feature-extraction''': GPTNeoXModel, '''question-answering''': GPTNeoXForQuestionAnswering, '''text-classification''': GPTNeoXForSequenceClassification, '''text-generation''': GPTNeoXForCausalLM, '''token-classification''': GPTNeoXForTokenClassification, '''zero-shot''': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) _A :Any = False _A :List[str] = False _A :Any = False _A :Tuple = False def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = GPTNeoXModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case__ , hidden_size=64 , num_attention_heads=8 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(snake_case__ , snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): # This regression test was failing with PyTorch < 1.3 lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase = None self.model_tester.create_and_check_model_as_decoder(snake_case__ , snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case__ , snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case__ ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def SCREAMING_SNAKE_CASE__ ( self : Any ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[Any] ): lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = ids_tensor([1, 10] , config.vocab_size ) lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase = GPTNeoXModel(snake_case__ ) original_model.to(snake_case__ ) original_model.eval() lowercase = original_model(snake_case__ ).last_hidden_state lowercase = original_model(snake_case__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase = {"type": scaling_type, "factor": 10.0} lowercase = GPTNeoXModel(snake_case__ ) scaled_model.to(snake_case__ ) scaled_model.eval() lowercase = scaled_model(snake_case__ ).last_hidden_state lowercase = scaled_model(snake_case__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) ) @require_torch class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: lowercase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(snake_case__ ) lowercase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 lowercase = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure" lowercase = model.generate(**snake_case__ , do_sample=snake_case__ , max_new_tokens=20 ) lowercase = tokenizer.batch_decode(snake_case__ )[0] self.assertEqual(snake_case__ , snake_case__ )
714
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): # Test when on a single CPU or GPU that the context manager does nothing lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): # Test on distributed setup that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase , lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
72
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : Optional[int] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart __SCREAMING_SNAKE_CASE : Optional[int] ={ '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''facebook/bart-base''': 1_024, '''facebook/bart-large''': 1_024, '''facebook/bart-large-mnli''': 1_024, '''facebook/bart-large-cnn''': 1_024, '''facebook/bart-large-xsum''': 1_024, '''yjernite/bart_eli5''': 1_024, } @lru_cache() def UpperCamelCase__ ( ): lowercase = ( list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) ) ) lowercase = bs[:] lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(a__ ) cs.append(2**8 + n ) n += 1 lowercase = [chr(a__ ) for n in cs] return dict(zip(a__ ,a__ ) ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = set() lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase = char return pairs class A_ ( __a ): _A :Optional[Any] = VOCAB_FILES_NAMES _A :int = PRETRAINED_VOCAB_FILES_MAP _A :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A :Union[str, Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : int , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int="replace" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Optional[int]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : str="<mask>" , snake_case__ : Tuple=False , **snake_case__ : List[str] , ): lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token super().__init__( errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding="""utf-8""" ) as vocab_handle: lowercase = json.load(lowercase_ ) lowercase = {v: k for k, v in self.encoder.items()} lowercase = errors # how to handle errors in decoding lowercase = bytes_to_unicode() lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowercase_ , encoding="""utf-8""" ) as merges_handle: lowercase = merges_handle.read().split("""\n""" )[1:-1] lowercase = [tuple(merge.split() ) for merge in bpe_merges] lowercase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase = {} lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ): return len(self.encoder ) def SCREAMING_SNAKE_CASE__ ( self : str ): return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : str ): if token in self.cache: return self.cache[token] lowercase = tuple(lowercase_ ) lowercase = get_pairs(lowercase_ ) if not pairs: return token while True: lowercase = min(lowercase_ , key=lambda snake_case__ : self.bpe_ranks.get(lowercase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowercase , lowercase = bigram lowercase = [] lowercase = 0 while i < len(lowercase_ ): try: lowercase = word.index(lowercase_ , lowercase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase = j if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase = tuple(lowercase_ ) lowercase = new_word if len(lowercase_ ) == 1: break else: lowercase = get_pairs(lowercase_ ) lowercase = """ """.join(lowercase_ ) lowercase = word return word def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Optional[Any] ): lowercase = [] for token in re.findall(self.pat , lowercase_ ): lowercase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(""" """ ) ) return bpe_tokens def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Optional[int] ): return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Optional[Any] ): return self.decoder.get(lowercase_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = """""".join(lowercase_ ) lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + """\n""" ) lowercase = 0 with open(lowercase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowercase = token_index writer.write(""" """.join(lowercase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) + [1] return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1] def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] , snake_case__ : int=False , **snake_case__ : str ): lowercase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()): lowercase = """ """ + text return (text, kwargs)
715
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''') __SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): _A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = 0 def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f: lowercase = json.load(snake_case__ ) config_dict.pop("""processor_class""" ) with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write(json.dumps(snake_case__ ) ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f: f.write("""{}""" ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) lowercase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) lowercase = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): class A_ ( __a ): _A :List[str] = False class A_ ( __a ): _A :Dict = False class A_ ( __a ): _A :Union[str, Any] = '''AutoFeatureExtractor''' _A :Tuple = '''AutoTokenizer''' _A :Optional[Any] = False try: AutoConfig.register("""custom""" , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class A_ ( unittest.TestCase ): _A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ): lowercase = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ): try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token ) lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , ) lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(snake_case__ , """vocab.txt""" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(snake_case__ ) lowercase = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f: lowercase = json.load(snake_case__ ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
72
0
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A_ ( __lowerCAmelCase ): _A :torch.FloatTensor _A :torch.FloatTensor class A_ ( __lowerCAmelCase , __lowerCAmelCase ): _A :Dict = 1 @register_to_config def __init__( self : Optional[int] , snake_case__ : Dict = 20_00 , snake_case__ : Optional[Any] = 0.15 , snake_case__ : Optional[Any] = 0.01 , snake_case__ : str = 1_348.0 , snake_case__ : Dict = 1E-5 , snake_case__ : List[Any] = 1 , ): # standard deviation of the initial noise distribution lowercase = sigma_max # setable values lowercase = None self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] = None ): return sample def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any = None , snake_case__ : Tuple = None ): lowercase = sampling_eps if sampling_eps is not None else self.config.sampling_eps lowercase = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Any] = None ): lowercase = sigma_min if sigma_min is not None else self.config.sigma_min lowercase = sigma_max if sigma_max is not None else self.config.sigma_max lowercase = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ ) lowercase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowercase = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) ) lowercase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : int ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict = None , snake_case__ : int = True , ): if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) lowercase = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowercase = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowercase = timesteps.to(self.discrete_sigmas.device ) lowercase = self.discrete_sigmas[timesteps].to(sample.device ) lowercase = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device ) lowercase = torch.zeros_like(lowerCAmelCase_ ) lowercase = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowercase = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowercase = diffusion.unsqueeze(-1 ) lowercase = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowercase = randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype ) lowercase = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowercase = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str = None , snake_case__ : str = True , ): if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowercase = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowercase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() lowercase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() lowercase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowercase = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowercase = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowercase = step_size.unsqueeze(-1 ) lowercase = sample + step_size * model_output lowercase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowercase = timesteps.to(original_samples.device ) lowercase = self.discrete_sigmas.to(original_samples.device )[timesteps] lowercase = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None] ) lowercase = noise + original_samples return noisy_samples def __len__( self : int ): return self.config.num_train_timesteps
716
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" ,[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" ,[ DatasetInfo(), DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_info.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfo.from_directory(lowerCAmelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) ) def UpperCamelCase__ ( ): lowercase = DatasetInfo( description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,) lowercase = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) ) lowercase = yaml.safe_dump(lowerCAmelCase__ ) lowercase = yaml.safe_load(lowerCAmelCase__ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase__ ( ): lowercase = DatasetInfo() lowercase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" ,[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1_337 ), } ), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = str(lowerCAmelCase__ ) dataset_infos_dict.write_to_directory(lowerCAmelCase__ ) lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowercase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
72
0
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __SCREAMING_SNAKE_CASE : List[Any] ='src/transformers' # This is to make sure the transformers module imported is the one in the repo. __SCREAMING_SNAKE_CASE : Any =importlib.util.spec_from_file_location( '''transformers''', os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __SCREAMING_SNAKE_CASE : Tuple =spec.loader.load_module() __SCREAMING_SNAKE_CASE : Optional[int] =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __SCREAMING_SNAKE_CASE : Any =re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __SCREAMING_SNAKE_CASE : Optional[int] ={ 'CLIPConfigMixin', 'DecisionTransformerConfigMixin', 'EncoderDecoderConfigMixin', 'RagConfigMixin', 'SpeechEncoderDecoderConfigMixin', 'VisionEncoderDecoderConfigMixin', 'VisionTextDualEncoderConfigMixin', } def UpperCamelCase__ ( ): lowercase = [] for config_class in list(CONFIG_MAPPING.values() ): lowercase = False # source code of `config_class` lowercase = inspect.getsource(lowerCAmelCase__ ) lowercase = _re_checkpoint.findall(lowerCAmelCase__ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` lowercase = checkpoint # verify the checkpoint name corresponds to the checkpoint link lowercase = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowercase = True break lowercase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: lowercase = '''\n'''.join(sorted(lowerCAmelCase__ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
717
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = args.pruning_method lowercase = args.threshold lowercase = args.model_name_or_path.rstrip("""/""" ) lowercase = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) lowercase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowercase = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: lowercase = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase = name[:-6] lowercase = model[f"""{prefix_}mask_scores"""] lowercase , lowercase = -0.1, 1.1 lowercase = torch.sigmoid(lowerCAmelCase__ ) lowercase = s * (r - l) + l lowercase = s_bar.clamp(min=0.0 ,max=1.0 ) lowercase = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: lowercase = os.path.join( os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser() parser.add_argument( '''--pruning_method''', choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''], type=str, required=True, help=( '''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,''' ''' sigmoied_threshold = Soft movement pruning)''' ), ) parser.add_argument( '''--threshold''', type=float, required=False, help=( '''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.''' '''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.''' '''Not needed for `l0`''' ), ) parser.add_argument( '''--model_name_or_path''', type=str, required=True, help='''Folder containing the model that was previously fine-pruned''', ) parser.add_argument( '''--target_model_path''', default=None, type=str, required=False, help='''Folder containing the model that was previously fine-pruned''', ) __SCREAMING_SNAKE_CASE : str =parser.parse_args() main(args)
72
0
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE : Optional[Any] ="""python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=None ): require_version(deps[pkg] ,lowerCAmelCase__ )
718
# using dfs for finding eulerian path traversal def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ): lowercase = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowercase , lowercase = True, True lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) return path def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = -1 for i in range(lowerCAmelCase__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowercase = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowercase , lowercase = check_circuit_or_path(lowerCAmelCase__ ,lowerCAmelCase__ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return lowercase = 1 if check == 2: lowercase = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) lowercase = dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) print(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowercase = { 1: [], 2: [] # all degree is zero } lowercase = 10 check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) check_euler(lowerCAmelCase__ ,lowerCAmelCase__ ) if __name__ == "__main__": main()
72
0
from math import factorial def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(__a ,__a ) or not isinstance(__a ,__a ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) lowercase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! lowercase = float(factorial(__a ) ) coefficient /= factorial(__a ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
719
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class A_ ( unittest.TestCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ): lowercase = parent lowercase = batch_size lowercase = seq_length lowercase = is_training lowercase = use_attention_mask lowercase = use_token_type_ids lowercase = use_labels lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = type_sequence_label_size lowercase = initializer_range lowercase = num_choices def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase = None if self.use_attention_mask: lowercase = random_attention_mask([self.batch_size, self.seq_length] ) lowercase = None if self.use_token_type_ids: lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A_ ( __a , unittest.TestCase ): _A :List[Any] = True _A :Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = FlaxRoFormerModelTester(self ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ ) lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case__ ) @require_flax class A_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase = model(snake_case__ )[0] lowercase = 5_00_00 lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , snake_case__ ) lowercase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
72
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict ={ """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _snake_case ): _A :int = """mobilenet_v1""" def __init__( self : Any , snake_case__ : Optional[int]=3 , snake_case__ : List[str]=2_24 , snake_case__ : Optional[Any]=1.0 , snake_case__ : Optional[Any]=8 , snake_case__ : str="relu6" , snake_case__ : Tuple=True , snake_case__ : Dict=0.999 , snake_case__ : Optional[Any]=0.02 , snake_case__ : str=0.001 , **snake_case__ : List[Any] , ): super().__init__(**snake_case_ ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) lowercase = num_channels lowercase = image_size lowercase = depth_multiplier lowercase = min_depth lowercase = hidden_act lowercase = tf_padding lowercase = classifier_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps class A_ ( _snake_case ): _A :Optional[Any] = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def SCREAMING_SNAKE_CASE__ ( self : Any ): return 1E-4
720
import argparse import hashlib # hashlib is only used inside the Test class import struct class A_ : def __init__( self : List[str] , snake_case__ : Union[str, Any] ): lowercase = data lowercase = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) lowercase = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Tuple ): lowercase = list(struct.unpack(""">16L""" , snake_case__ ) ) + [0] * 64 for i in range(16 , 80 ): lowercase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.padding() lowercase = self.split_blocks() for block in self.blocks: lowercase = self.expand_block(snake_case__ ) lowercase , lowercase , lowercase , lowercase , lowercase = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowercase = (b & c) | ((~b) & d) lowercase = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: lowercase = b ^ c ^ d lowercase = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: lowercase = (b & c) | (b & d) | (c & d) lowercase = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: lowercase = b ^ c ^ d lowercase = 0Xc_a_6_2_c_1_d_6 lowercase , lowercase , lowercase , lowercase , lowercase = ( self.rotate(snake_case__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(snake_case__ , 30 ), c, d, ) lowercase = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def UpperCamelCase__ ( ): lowercase = b"""Test String""" assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324 def UpperCamelCase__ ( ): lowercase = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) lowercase = parser.parse_args() lowercase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: lowercase = f.read() else: lowercase = bytes(lowerCAmelCase__ ,"""utf-8""" ) print(SHAaHash(lowerCAmelCase__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
72
0
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values __SCREAMING_SNAKE_CASE : Dict =argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] =parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') __SCREAMING_SNAKE_CASE : Union[str, Any] =rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: __SCREAMING_SNAKE_CASE : Optional[int] =rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) __SCREAMING_SNAKE_CASE : List[Any] =args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
721
class A_ : def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ): lowercase = name lowercase = val def __str__( self : str ): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : int , snake_case__ : Optional[int] ): return self.val < other.val class A_ : def __init__( self : str , snake_case__ : List[str] ): lowercase = {} lowercase = {} lowercase = self.build_heap(snake_case__ ) def __getitem__( self : Union[str, Any] , snake_case__ : int ): return self.get_value(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ): return (idx - 1) // 2 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ): return idx * 2 + 1 def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] ): return idx * 2 + 2 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ): return self.heap_dict[key] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ): lowercase = len(snake_case__ ) - 1 lowercase = self.get_parent_idx(snake_case__ ) for idx, i in enumerate(snake_case__ ): lowercase = idx lowercase = i.val for i in range(snake_case__ , -1 , -1 ): self.sift_down(snake_case__ , snake_case__ ) return array def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : str ): while True: lowercase = self.get_left_child_idx(snake_case__ ) # noqa: E741 lowercase = self.get_right_child_idx(snake_case__ ) lowercase = idx if l < len(snake_case__ ) and array[l] < array[idx]: lowercase = l if r < len(snake_case__ ) and array[r] < array[smallest]: lowercase = r if smallest != idx: lowercase , lowercase = array[smallest], array[idx] ( ( lowercase ) , ( lowercase ) , ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase = smallest else: break def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ): lowercase = self.get_parent_idx(snake_case__ ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase , lowercase = self.heap[idx], self.heap[p] lowercase , lowercase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase = p lowercase = self.get_parent_idx(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int ): return self.heap[0] def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase , lowercase = self.heap[-1], self.heap[0] lowercase , lowercase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): self.heap.append(snake_case__ ) lowercase = len(self.heap ) - 1 lowercase = node.val self.sift_up(len(self.heap ) - 1 ) def SCREAMING_SNAKE_CASE__ ( self : int ): return len(self.heap ) == 0 def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase = new_value lowercase = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE : Any =Node('''R''', -1) __SCREAMING_SNAKE_CASE : Union[str, Any] =Node('''B''', 6) __SCREAMING_SNAKE_CASE : str =Node('''A''', 3) __SCREAMING_SNAKE_CASE : List[Any] =Node('''X''', 1) __SCREAMING_SNAKE_CASE : str =Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE : Any =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
72
0
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class A_ ( __UpperCAmelCase ): _A :List[str] = "Wav2Vec2FeatureExtractor" _A :List[Any] = "AutoTokenizer" def __init__( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): super().__init__(_lowerCamelCase , _lowerCamelCase ) lowercase = self.feature_extractor lowercase = False @classmethod def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Tuple , **snake_case__ : List[str] ): try: return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase ) except OSError: warnings.warn( F"""Loading a tokenizer inside {cls.__name__} from a config that does not""" """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ , _lowerCamelCase , ) lowercase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) lowercase = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) def __call__( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : str ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) lowercase = kwargs.pop("""raw_speech""" ) else: lowercase = kwargs.pop("""audio""" , _lowerCamelCase ) lowercase = kwargs.pop("""sampling_rate""" , _lowerCamelCase ) lowercase = kwargs.pop("""text""" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: lowercase = args[0] lowercase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: lowercase = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: lowercase = encodings["""input_ids"""] return inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : str , **snake_case__ : List[Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase ) lowercase = kwargs.pop("""input_features""" , _lowerCamelCase ) lowercase = kwargs.pop("""labels""" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: lowercase = args[0] lowercase = args[1:] if input_features is not None: lowercase = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) if labels is not None: lowercase = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: lowercase = labels["""input_ids"""] return input_features def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : Dict , **snake_case__ : Dict ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def SCREAMING_SNAKE_CASE__ ( self : Tuple ): warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) lowercase = True lowercase = self.tokenizer yield lowercase = self.feature_extractor lowercase = False
700
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig __SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__) # General docstring __SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig''' # Base docstring __SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7] # Image classification docstring __SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12''' __SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat''' __SCREAMING_SNAKE_CASE : Tuple =[ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ): if drop_prob == 0.0 or not training: return input lowercase = 1 - drop_prob lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize lowercase = input.div(lowerCAmelCase__ ) * random_tensor return output class A_ ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ): super().__init__() lowercase = drop_prob def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ): return drop_path(snake_case__ , self.drop_prob , self.training ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return "p={}".format(self.drop_prob ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ): super().__init__() lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size) lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride) lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding) lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ ) lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity() def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ): lowercase = self.projection(snake_case__ ) lowercase = self.norm(snake_case__ ) return embeddings class A_ ( nn.GroupNorm ): def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ): super().__init__(1 , snake_case__ , **snake_case__ ) class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any ): super().__init__() lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ): return self.pool(snake_case__ ) - hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ): super().__init__() lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = nn.Convad(snake_case__ , snake_case__ , 1 ) lowercase = PoolFormerDropPath(snake_case__ ) if isinstance(config.hidden_act , snake_case__ ): lowercase = ACTaFN[config.hidden_act] else: lowercase = config.hidden_act def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ): lowercase = self.conva(snake_case__ ) lowercase = self.act_fn(snake_case__ ) lowercase = self.drop(snake_case__ ) lowercase = self.conva(snake_case__ ) lowercase = self.drop(snake_case__ ) return hidden_states class A_ ( nn.Module ): def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ): super().__init__() lowercase = PoolFormerPooling(snake_case__ ) lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) lowercase = PoolFormerGroupNorm(snake_case__ ) # Useful for training neural nets lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity() lowercase = config.use_layer_scale if config.use_layer_scale: lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) lowercase = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ): if self.use_layer_scale: lowercase = self.pooling(self.before_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = () lowercase = self.output(self.after_norm(snake_case__ ) ) lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowercase = hidden_states + self.drop_path(snake_case__ ) lowercase = (output,) + outputs return outputs else: lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) ) # First residual connection lowercase = pooling_output + hidden_states lowercase = () # Second residual connection inside the PoolFormerOutput block lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) ) lowercase = hidden_states + layer_output lowercase = (output,) + outputs return outputs class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): super().__init__() lowercase = config # stochastic depth decay rule lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowercase = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowercase = nn.ModuleList(snake_case__ ) # Transformer blocks lowercase = [] lowercase = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowercase = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case__ ) ) lowercase = nn.ModuleList(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ): lowercase = () if output_hidden_states else None lowercase = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowercase , lowercase = layers # Get patch embeddings from hidden_states lowercase = embedding_layer(snake_case__ ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case__ ): lowercase = blk(snake_case__ ) lowercase = layer_outputs[0] if output_hidden_states: lowercase = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ ) class A_ ( __a ): _A :Any = PoolFormerConfig _A :int = '''poolformer''' _A :Union[str, Any] = '''pixel_values''' _A :str = True def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ): if isinstance(snake_case__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ): if isinstance(snake_case__ , snake_case__ ): lowercase = value __SCREAMING_SNAKE_CASE : Optional[Any] =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __SCREAMING_SNAKE_CASE : str =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , ) class A_ ( __a ): def __init__( self : Union[str, Any] , snake_case__ : int ): super().__init__(snake_case__ ) lowercase = config lowercase = PoolFormerEncoder(snake_case__ ) # Initialize weights and apply final processing self.post_init() def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) lowercase = self.encoder( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , ) class A_ ( nn.Module ): def __init__( self : List[str] , snake_case__ : Optional[int] ): super().__init__() lowercase = nn.Linear(config.hidden_size , config.hidden_size ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ): lowercase = self.dense(snake_case__ ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , __a , ) class A_ ( __a ): def __init__( self : Dict , snake_case__ : Any ): super().__init__(snake_case__ ) lowercase = config.num_labels lowercase = PoolFormerModel(snake_case__ ) # Final norm lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowercase = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ): lowercase = return_dict if return_dict is not None else self.config.use_return_dict lowercase = self.poolformer( snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , ) lowercase = outputs[0] lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) ) lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase = """single_label_classification""" else: lowercase = """multi_label_classification""" if self.config.problem_type == "regression": lowercase = MSELoss() if self.num_labels == 1: lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase = loss_fct(snake_case__ , snake_case__ ) elif self.config.problem_type == "single_label_classification": lowercase = CrossEntropyLoss() lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase = BCEWithLogitsLoss() lowercase = loss_fct(snake_case__ , snake_case__ ) if not return_dict: lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
72
0
class A_ : def __init__( self : List[str] , snake_case__ : Optional[int] ): lowercase = set_counts lowercase = max(__lowerCamelCase ) lowercase = len(__lowerCamelCase ) lowercase = [1] * num_sets lowercase = list(range(__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Tuple , snake_case__ : Any ): lowercase = self.get_parent(__lowerCamelCase ) lowercase = self.get_parent(__lowerCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase = 0 lowercase = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase = 0 lowercase = src_parent lowercase = self.set_counts[src_parent] lowercase = max(self.max_set , __lowerCamelCase ) return True def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[int] ): if self.parents[disj_set] == disj_set: return disj_set lowercase = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
701
from numpy import exp, pi, sqrt def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowercase = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" lowercase = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" lowercase = max(len(UpperCamelCase__ ) ,len(UpperCamelCase__ ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) ,b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
702
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = 0 lowercase = len(__SCREAMING_SNAKE_CASE ) # No of vertices in graph lowercase = [0] * n lowercase = [False] * n def dfs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = True lowercase = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,id_ ) lowercase = min(low[at] ,low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge lowercase = min(low[at] ,low[to] ) lowercase = [] for i in range(__SCREAMING_SNAKE_CASE ): if not visited[i]: dfs(__SCREAMING_SNAKE_CASE ,-1 ,__SCREAMING_SNAKE_CASE ,id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
703
import math from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class A_ ( __a ): _A :Tuple = '''data2vec-audio''' def __init__( self : Optional[Any] , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=7_68 , snake_case__ : int=12 , snake_case__ : Dict=12 , snake_case__ : List[str]=30_72 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : List[str]=16 , snake_case__ : Any=19 , snake_case__ : Optional[Any]=5 , snake_case__ : str=0.05 , snake_case__ : Tuple=10 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=0.0 , snake_case__ : int=10 , snake_case__ : Any=0 , snake_case__ : int="sum" , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Optional[int]=2_56 , snake_case__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[str]=(5, 3, 3, 1, 1) , snake_case__ : int=(1, 2, 3, 1, 1) , snake_case__ : Optional[Any]=5_12 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : Tuple=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Tuple=3 , snake_case__ : List[str]=None , **snake_case__ : str , ): super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) lowercase = hidden_size lowercase = feat_extract_activation lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = conv_bias lowercase = num_conv_pos_embeddings lowercase = num_conv_pos_embedding_groups lowercase = conv_pos_kernel_size lowercase = len(self.conv_dim ) lowercase = num_hidden_layers lowercase = intermediate_size lowercase = hidden_act lowercase = num_attention_heads lowercase = hidden_dropout lowercase = attention_dropout lowercase = activation_dropout lowercase = feat_proj_dropout lowercase = final_dropout lowercase = layerdrop lowercase = layer_norm_eps lowercase = initializer_range lowercase = vocab_size lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase = mask_time_prob lowercase = mask_time_length lowercase = mask_time_min_masks lowercase = mask_feature_prob lowercase = mask_feature_length lowercase = mask_feature_min_masks # ctc loss lowercase = ctc_loss_reduction lowercase = ctc_zero_infinity # adapter lowercase = add_adapter lowercase = adapter_kernel_size lowercase = adapter_stride lowercase = num_adapter_layers lowercase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = list(snake_case__ ) lowercase = xvector_output_dim @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return math.prod(self.conv_stride )
72
0
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = DownBlockaD # noqa F405 _A :Optional[int] = '''down''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :Optional[int] = ResnetDownsampleBlockaD # noqa F405 _A :str = '''down''' def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :int = AttnDownBlockaD # noqa F405 _A :int = '''down''' def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :int = CrossAttnDownBlockaD # noqa F405 _A :Optional[int] = '''down''' def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase , lowercase = super().prepare_init_args_and_inputs_for_common() lowercase = 32 return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = SimpleCrossAttnDownBlockaD # noqa F405 _A :Dict = '''down''' @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return super().get_dummy_input(include_encoder_hidden_states=A_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase , lowercase = super().prepare_init_args_and_inputs_for_common() lowercase = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = SkipDownBlockaD # noqa F405 _A :Dict = '''down''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return super().get_dummy_input(include_skip_sample=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = AttnSkipDownBlockaD # noqa F405 _A :List[str] = '''down''' @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return super().get_dummy_input(include_skip_sample=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :int = DownEncoderBlockaD # noqa F405 _A :Optional[int] = '''down''' @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return super().get_dummy_input(include_temb=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = { """in_channels""": 32, """out_channels""": 32, } lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :Dict = AttnDownEncoderBlockaD # noqa F405 _A :Union[str, Any] = '''down''' @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return super().get_dummy_input(include_temb=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = { """in_channels""": 32, """out_channels""": 32, } lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = UNetMidBlockaD # noqa F405 _A :Any = '''mid''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = { """in_channels""": 32, """temb_channels""": 1_28, } lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = UNetMidBlockaDCrossAttn # noqa F405 _A :Tuple = '''mid''' def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase , lowercase = super().prepare_init_args_and_inputs_for_common() lowercase = 32 return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _A :Optional[int] = '''mid''' @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return super().get_dummy_input(include_encoder_hidden_states=A_ ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase , lowercase = super().prepare_init_args_and_inputs_for_common() lowercase = 32 return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :Optional[Any] = UpBlockaD # noqa F405 _A :str = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : Any ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :int = ResnetUpsampleBlockaD # noqa F405 _A :int = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : str ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :List[str] = CrossAttnUpBlockaD # noqa F405 _A :Dict = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase , lowercase = super().prepare_init_args_and_inputs_for_common() lowercase = 32 return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405 _A :Any = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : str ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ , include_encoder_hidden_states=A_ ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase , lowercase = super().prepare_init_args_and_inputs_for_common() lowercase = 32 return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :List[str] = AttnUpBlockaD # noqa F405 _A :Optional[int] = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :List[Any] = SkipUpBlockaD # noqa F405 _A :Any = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = AttnSkipUpBlockaD # noqa F405 _A :str = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): return super().get_dummy_input(include_res_hidden_states_tuple=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :Optional[int] = UpDecoderBlockaD # noqa F405 _A :Tuple = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): return super().get_dummy_input(include_temb=A_ ) def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = {"""in_channels""": 32, """out_channels""": 32} lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137] super().test_output(A_ ) class lowercase ( _UpperCAmelCase , unittest.TestCase ): _A :str = AttnUpDecoderBlockaD # noqa F405 _A :Any = '''up''' @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): return super().get_dummy_input(include_temb=A_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = {"""in_channels""": 32, """out_channels""": 32} lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568] super().test_output(A_ )
704
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase , lowercase = emb.weight.shape lowercase = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowercase = emb.weight.data return lin_layer def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ ,map_location="""cpu""" ) lowercase = Namespace(**checkpoint["""cfg"""]["""model"""] ) lowercase = checkpoint["""model"""] remove_ignore_keys_(lowerCAmelCase__ ) lowercase = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()} lowercase = XGLMConfig( vocab_size=lowerCAmelCase__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase = XGLMForCausalLM(lowerCAmelCase__ ) lowercase = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ ) print(lowerCAmelCase__ ) lowercase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A_ ( __lowercase ): _A :Tuple = '''naver-clova-ix/donut-base-finetuned-docvqa''' _A :Dict = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) _A :str = '''document_qa''' _A :Optional[Any] = AutoProcessor _A :List[Any] = VisionEncoderDecoderModel _A :int = ['''image''', '''text'''] _A :Optional[int] = ['''text'''] def __init__( self : Any , *snake_case__ : Optional[int] , **snake_case__ : List[str] ): if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : "Image" , snake_case__ : str ): lowercase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" lowercase = task_prompt.replace("""{user_input}""" , snake_case__ ) lowercase = self.pre_processor.tokenizer( snake_case__ , add_special_tokens=snake_case__ , return_tensors="""pt""" ).input_ids lowercase = self.pre_processor(snake_case__ , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] ): return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case__ , ).sequences def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ): lowercase = self.pre_processor.batch_decode(snake_case__ )[0] lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) lowercase = re.sub(R"""<.*?>""" , """""" , snake_case__ , count=1 ).strip() # remove first task start token lowercase = self.pre_processor.tokenajson(snake_case__ ) return sequence["answer"]
705
from __future__ import annotations import bisect def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): if hi < 0: lowercase = len(lowerCAmelCase__ ) while lo < hi: lowercase = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase = mid + 1 else: lowercase = mid return lo def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ): sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = 0 lowercase = len(lowerCAmelCase__ ) - 1 while left <= right: lowercase = left + (right - left) // 2 lowercase = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase = midpoint - 1 else: lowercase = midpoint + 1 return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ) if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): if right < left: return None lowercase = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 ) else: return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip() __SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''',''')) __SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n''')) __SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
72
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] ="▁" __SCREAMING_SNAKE_CASE : Tuple ={"vocab_file": "sentencepiece.bpe.model"} __SCREAMING_SNAKE_CASE : Optional[Any] ={ "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } __SCREAMING_SNAKE_CASE : Any ={ "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class A_ ( __a ): _A :Optional[int] = VOCAB_FILES_NAMES _A :List[Any] = PRETRAINED_VOCAB_FILES_MAP _A :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A :int = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : Dict="<s>" , snake_case__ : Optional[int]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Optional[Any]="<s>" , snake_case__ : List[str]="<unk>" , snake_case__ : Optional[Any]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[int] , ): # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) lowercase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowercase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase = 1 lowercase = len(self.sp_model ) + self.fairseq_offset lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : str ): lowercase = self.__dict__.copy() lowercase = None lowercase = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[Any] , snake_case__ : Optional[Any] ): lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : str ): return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : str ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] ): lowercase = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : str , snake_case__ : Optional[str] = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase = os.path.join( __UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , """wb""" ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
706
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = multiprocessing.Manager() lowercase = manager.list() lowercase = multiprocessing.Process(target=lowerCAmelCase__ ,args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase = shutil.rmtree lowercase = os.rmdir lowercase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase = {} with swallow_io(): with time_limit(lowerCAmelCase__ ): exec(lowerCAmelCase__ ,lowerCAmelCase__ ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase = rmtree lowercase = rmdir lowercase = chdir @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): def signal_handler(lowerCAmelCase__ ,lowerCAmelCase__ ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL ,lowerCAmelCase__ ) signal.signal(signal.SIGALRM ,lowerCAmelCase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0 ) @contextlib.contextmanager def UpperCamelCase__ ( ): lowercase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCAmelCase__ ): with contextlib.redirect_stderr(lowerCAmelCase__ ): with redirect_stdin(lowerCAmelCase__ ): yield @contextlib.contextmanager def UpperCamelCase__ ( ): with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCAmelCase__ ): yield dirname class A_ ( __a ): pass class A_ ( io.StringIO ): def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case__ : int , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ): raise OSError def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : Any ): return False class A_ ( contextlib._RedirectStream ): # type: ignore _A :List[Any] = '''stdin''' @contextlib.contextmanager def UpperCamelCase__ ( lowerCAmelCase__ ): if root == ".": yield return lowercase = os.getcwd() os.chdir(lowerCAmelCase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=None ): if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase = None lowercase = None import os lowercase = """1""" lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None import shutil lowercase = None lowercase = None lowercase = None import subprocess lowercase = None # type: ignore lowercase = None import sys lowercase = None lowercase = None lowercase = None lowercase = None lowercase = None
72
0
def UpperCamelCase__ ( lowerCAmelCase__ ): if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) lowercase = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
707
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A_ ( __a ): _A :Optional[int] = ['''image_processor''', '''tokenizer'''] _A :Tuple = '''BlipImageProcessor''' _A :List[Any] = '''AutoTokenizer''' def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Dict ): lowercase = False super().__init__(snake_case__ , snake_case__ ) lowercase = self.image_processor def __call__( self : List[str] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowercase = self.tokenizer lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self : str , *snake_case__ : int , **snake_case__ : int ): return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = self.tokenizer.model_input_names lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
72
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ): '''simple docstring''' model.train() lowercase = model(lowerCAmelCase__ ) lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): '''simple docstring''' set_seed(42 ) lowercase = RegressionModel() lowercase = deepcopy(lowerCAmelCase__ ) lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) model.to(accelerator.device ) if sched: lowercase = AdamW(params=model.parameters() ,lr=1E-3 ) lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase__ ): '''simple docstring''' lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__ ): '''simple docstring''' lowercase = get_training_setup(lowerCAmelCase__ ) # Use a single batch lowercase = next(iter(lowerCAmelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) else: # Sync grads step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): '''simple docstring''' lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase = get_training_setup(lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad ,ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )] GradientState._reset_state() def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ): '''simple docstring''' lowercase = Accelerator( split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ ) for iteration, batch in enumerate(lowerCAmelCase__ ): lowercase = batch.values() # Gather the distributed inputs and targs for the base model lowercase = accelerator.gather((ddp_input, ddp_target) ) lowercase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCAmelCase__ ): step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ )) if accelerator.num_processes > 1: check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase__ ( ): '''simple docstring''' lowercase = Accelerator() lowercase = RegressionDataset(length=80 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = RegressionDataset(length=96 ) lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 ) lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if iteration < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCAmelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ ) if batch_num < len(lowerCAmelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase__ ( ): '''simple docstring''' lowercase = Accelerator() lowercase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCAmelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCAmelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,) test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): '''simple docstring''' main() if __name__ == "__main__": main()
708
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCamelCase__ ( lowerCAmelCase__ ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowercase = model_type_to_module_name(lowerCAmelCase__ ) lowercase = importlib.import_module(f""".{module_name}""" ,"""transformers.models""" ) try: return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase__ ,"""__name__""" ,lowerCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowercase = importlib.import_module("""transformers""" ) if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ): return getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) return None def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,lowerCAmelCase__ = False ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = False ,**lowerCAmelCase__ ,): lowercase = get_file_from_repo( lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(lowerCAmelCase__ ,encoding="""utf-8""" ) as reader: return json.load(lowerCAmelCase__ ) class A_ : def __init__( self : List[Any] ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( cls : Dict , snake_case__ : Tuple , **snake_case__ : int ): lowercase = kwargs.pop("""config""" , snake_case__ ) lowercase = kwargs.pop("""trust_remote_code""" , snake_case__ ) lowercase = True lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ ) lowercase = config_dict.get("""feature_extractor_type""" , snake_case__ ) lowercase = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case__ , snake_case__ ): lowercase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) # It could be in `config.feature_extractor_type`` lowercase = getattr(snake_case__ , """feature_extractor_type""" , snake_case__ ) if hasattr(snake_case__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: lowercase = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: lowercase = feature_extractor_class_from_name(snake_case__ ) lowercase = feature_extractor_auto_map is not None lowercase = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING lowercase = resolve_trust_remote_code( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if has_remote_code and trust_remote_code: lowercase = get_class_from_dynamic_module( snake_case__ , snake_case__ , **snake_case__ ) lowercase = kwargs.pop("""code_revision""" , snake_case__ ) if os.path.isdir(snake_case__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING: lowercase = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )] return feature_extractor_class.from_dict(snake_case__ , **snake_case__ ) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] , snake_case__ : List[str] ): FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
72
0
from math import factorial def UpperCamelCase__ ( lowerCAmelCase__ = 100 ): return sum(map(lowerCAmelCase__ ,str(factorial(lowerCAmelCase__ ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
709
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''') __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __SCREAMING_SNAKE_CASE : Optional[Any] ={ '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __SCREAMING_SNAKE_CASE : Optional[int] ={ '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __SCREAMING_SNAKE_CASE : List[Any] ={ '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __SCREAMING_SNAKE_CASE : List[Any] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : List[str] ={ **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Optional[int] ={ **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : List[str] =[ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): for attribute in key.split(""".""" ): lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) if weight_type is not None: lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape else: lowercase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase = value elif weight_type == "weight_g": lowercase = value elif weight_type == "weight_v": lowercase = value elif weight_type == "bias": lowercase = value elif weight_type == "running_mean": lowercase = value elif weight_type == "running_var": lowercase = value elif weight_type == "num_batches_tracked": lowercase = value else: lowercase = value logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] if task == "s2t": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2T lowercase = IGNORE_KEYS_S2T elif task == "t2s": lowercase = None lowercase = MAPPING_T2S lowercase = IGNORE_KEYS_T2S elif task == "s2s": lowercase = hf_model.speechta.encoder.prenet.feature_encoder lowercase = MAPPING_S2S lowercase = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowercase = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,) lowercase = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowercase , lowercase = key.split(""".*.""" ) if prefix in name and suffix in name: lowercase = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowercase = True if "*" in mapped_key: lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2] lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ ) if "weight_g" in name: lowercase = """weight_g""" elif "weight_v" in name: lowercase = """weight_v""" elif "bias" in name: lowercase = """bias""" elif "weight" in name: lowercase = """weight""" elif "running_mean" in name: lowercase = """running_mean""" elif "running_var" in name: lowercase = """running_var""" elif "num_batches_tracked" in name: lowercase = """num_batches_tracked""" else: lowercase = None set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = full_name.split("""conv_layers.""" )[-1] lowercase = name.split(""".""" ) lowercase = int(items[0] ) lowercase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,): if config_path is not None: lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = SpeechTaConfig() if task == "s2t": lowercase = config.max_text_positions lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": lowercase = 1_876 lowercase = 600 lowercase = config.max_speech_positions lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": lowercase = 1_876 lowercase = config.max_speech_positions lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) lowercase = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowercase = SpeechTaFeatureExtractor() lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) lowercase = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
72
0
import unittest from transformers import DonutProcessor __SCREAMING_SNAKE_CASE : str ='''naver-clova-ix/donut-base''' class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : int ): lowercase = DonutProcessor.from_pretrained(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = { """name""": """John Doe""", """age""": """99""", """city""": """Atlanta""", """state""": """GA""", """zip""": """30301""", """phone""": """123-4567""", """nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}], } lowercase = ( """<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>""" """<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>""" """<s_nicknames><s_nickname>Johnny</s_nickname>""" """<sep/><s_nickname>JD</s_nickname></s_nicknames>""" ) lowercase = self.processor.tokenajson(UpperCamelCase__ ) self.assertDictEqual(UpperCamelCase__ , UpperCamelCase__ )
710
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __SCREAMING_SNAKE_CASE : List[Any] ='''.''' if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __SCREAMING_SNAKE_CASE : Dict =[] __SCREAMING_SNAKE_CASE : Dict =[] with open(doctest_file_path) as fp: for line in fp: __SCREAMING_SNAKE_CASE : Optional[Any] =line.strip() __SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths) raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
72
0
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels __SCREAMING_SNAKE_CASE : Any =object() # For specifying empty leaf dict `{}` __SCREAMING_SNAKE_CASE : Any =object() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ): lowercase = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase ,ks[i:] )] if matches and all(__lowerCAmelCase ): return True return False def UpperCamelCase__ ( lowerCAmelCase__ ): def replace(lowerCAmelCase__ ,lowerCAmelCase__ ): for rule, replacement in rules: if _match(__lowerCAmelCase ,__lowerCAmelCase ): return replacement return val return replace def UpperCamelCase__ ( ): return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" ,__lowerCAmelCase )), (("transformer", "wte", "embedding"), P("""mp""" ,__lowerCAmelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase ,"""mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" ,__lowerCAmelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__lowerCAmelCase ,"""mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" ,__lowerCAmelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = _get_partition_rules() lowercase = _replacement_rules(__lowerCAmelCase ) lowercase = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )} lowercase = {k: replace(__lowerCAmelCase ,__lowerCAmelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__lowerCAmelCase ) )
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple ={ '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] =[ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __SCREAMING_SNAKE_CASE : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
72
0
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A_ ( pl.LightningModule ): def __init__( self : str , snake_case__ : int ): super().__init__() lowercase = model lowercase = 2 lowercase = nn.Linear(self.model.config.hidden_size , self.num_labels ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): pass def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): # load longformer model from model identifier lowercase = LongformerModel.from_pretrained(a_ ) lowercase = LightningModel(a_ ) lowercase = torch.load(a_ ,map_location=torch.device("""cpu""" ) ) lightning_model.load_state_dict(ckpt["""state_dict"""] ) # init longformer question answering model lowercase = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE : Any =parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
712
import argparse import os import re import packaging.version __SCREAMING_SNAKE_CASE : Optional[int] ='''examples/''' __SCREAMING_SNAKE_CASE : Any ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } __SCREAMING_SNAKE_CASE : Union[str, Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } __SCREAMING_SNAKE_CASE : Any ='''README.md''' def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.read() lowercase , lowercase = REPLACE_PATTERNS[pattern] lowercase = replace.replace("""VERSION""" ,lowerCAmelCase__ ) lowercase = re_pattern.sub(lowerCAmelCase__ ,lowerCAmelCase__ ) with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): for folder, directories, fnames in os.walk(lowerCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,pattern="""examples""" ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) if not patch: update_version_in_examples(lowerCAmelCase__ ) def UpperCamelCase__ ( ): lowercase = """🤗 Transformers currently provides the following architectures""" lowercase = """1. Want to contribute a new model?""" with open(lowerCAmelCase__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: lowercase = f.readlines() # Find the start of the list. lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowercase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,) index += 1 with open(lowerCAmelCase__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lowerCAmelCase__ ) def UpperCamelCase__ ( ): with open(REPLACE_FILES["""init"""] ,"""r""" ) as f: lowercase = f.read() lowercase = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0] return packaging.version.parse(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__=False ): lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowercase = default_version.base_version elif patch: lowercase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = default_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ,patch=lowerCAmelCase__ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def UpperCamelCase__ ( ): lowercase = get_version() lowercase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase = current_version.base_version # Check with the user we got that right. lowercase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(lowerCAmelCase__ ) == 0: lowercase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(lowerCAmelCase__ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') __SCREAMING_SNAKE_CASE : Optional[int] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
72
0