code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __snake_case = 2 class lowercase : """simple docstring""" def __init__( self , *, # begin keyword-only arguments UpperCamelCase_="<s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_=None , ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = bos, unk, pad, eos UpperCamelCase__ :Dict = [] UpperCamelCase__ :Any = [] UpperCamelCase__ :Dict = {} UpperCamelCase__ :Dict = self.add_symbol(A_ ) UpperCamelCase__ :Optional[Any] = self.add_symbol(A_ ) UpperCamelCase__ :Tuple = self.add_symbol(A_ ) UpperCamelCase__ :str = self.add_symbol(A_ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(A_ ) UpperCamelCase__ :Optional[int] = len(self.symbols ) def __eq__( self , UpperCamelCase_ ): '''simple docstring''' return self.indices == other.indices def __getitem__( self , UpperCamelCase_ ): '''simple docstring''' if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): '''simple docstring''' return len(self.symbols ) def __contains__( self , UpperCamelCase_ ): '''simple docstring''' return sym in self.indices @classmethod def lowerCAmelCase__ ( cls , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = cls() d.add_from_file(A_ ) return d def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False ): '''simple docstring''' if word in self.indices and not overwrite: UpperCamelCase__ :Union[str, Any] = self.indices[word] UpperCamelCase__ :List[Any] = self.count[idx] + n return idx else: UpperCamelCase__ :Union[str, Any] = len(self.symbols ) UpperCamelCase__ :Union[str, Any] = idx self.symbols.append(A_ ) self.count.append(A_ ) return idx def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' return 0 def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' if isinstance(A_ , A_ ): try: with open(A_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(A_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A_ ) ) return UpperCamelCase__ :int = f.readlines() UpperCamelCase__ :Tuple = self._load_meta(A_ ) for line in lines[indices_start_line:]: try: UpperCamelCase__ , UpperCamelCase__ :Optional[int] = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": UpperCamelCase__ :Optional[int] = True UpperCamelCase__ , UpperCamelCase__ :List[str] = line.rsplit(''' ''' , 1 ) else: UpperCamelCase__ :Dict = False UpperCamelCase__ :Dict = int(A_ ) UpperCamelCase__ :Dict = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(A_ ) ) self.add_symbol(A_ , n=A_ , overwrite=A_ ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def a ( __a ) -> Dict: '''simple docstring''' UpperCamelCase__ :Optional[int] = dict((re.sub(R'''@@$''' , '''''' , _lowercase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _lowercase ), v) for k, v in d.items() ) UpperCamelCase__ :str = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f'''{k}</w>'''] UpperCamelCase__ :List[str] = d[k] # restore return da def a ( __a , __a ) -> Any: '''simple docstring''' if not os.path.exists(_lowercase ): raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(_lowercase , exist_ok=_lowercase ) print(f'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models UpperCamelCase__ :int = os.path.join(_lowercase , '''checkpoint.pt''' ) if not os.path.isfile(_lowercase ): raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' ) UpperCamelCase__ :Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' ) UpperCamelCase__ :Dict = chkpt['''cfg''']['''model'''] # dicts UpperCamelCase__ :Dict = os.path.join(_lowercase , '''dict.txt''' ) if not os.path.isfile(_lowercase ): raise ValueError(f'''path to the file {dict_file} does not exist!''' ) UpperCamelCase__ :Union[str, Any] = Dictionary.load(_lowercase ) UpperCamelCase__ :List[str] = rewrite_dict_keys(src_dict.indices ) UpperCamelCase__ :Optional[int] = len(_lowercase ) UpperCamelCase__ :str = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] ) print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) ) # merges_file (bpecodes) UpperCamelCase__ :Any = os.path.join(_lowercase , '''bpecodes''' ) if not os.path.isfile(_lowercase ): raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' ) UpperCamelCase__ :Any = os.path.join(_lowercase , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(_lowercase , _lowercase ) # model config UpperCamelCase__ :Union[str, Any] = os.path.join(_lowercase , '''config.json''' ) UpperCamelCase__ :Optional[int] = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.0_2, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1e-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(f'''Generating {biogpt_model_config_file}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) ) # tokenizer config UpperCamelCase__ :Any = os.path.join(_lowercase , _lowercase ) UpperCamelCase__ :Union[str, Any] = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1024, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(f'''Generating {biogpt_tokenizer_config_file}''' ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) ) # model UpperCamelCase__ :List[str] = chkpt['''model'''] # remove unneeded keys UpperCamelCase__ :Any = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(_lowercase , _lowercase ) UpperCamelCase__ :str = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): UpperCamelCase__ :Optional[int] = model_state_dict.pop(_lowercase ) else: UpperCamelCase__ :List[str] = model_state_dict.pop(_lowercase ) UpperCamelCase__ :List[Any] = BioGptConfig.from_pretrained(_lowercase ) UpperCamelCase__ :Optional[Any] = BioGptForCausalLM(_lowercase ) # check that it loads ok model_new.load_state_dict(_lowercase ) # save UpperCamelCase__ :str = os.path.join(_lowercase , _lowercase ) print(f'''Generating {pytorch_weights_dump_path}''' ) torch.save(_lowercase , _lowercase ) print('''Conversion is done!''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __snake_case = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
97
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowerCamelCase (_a ): _lowercase = ["""image_processor""", """tokenizer"""] _lowercase = """OwlViTImageProcessor""" _lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ): '''simple docstring''' __UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.',A_,) __UpperCamelCase = kwargs.pop('feature_extractor' ) __UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(A_,A_ ) def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )): __UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )] elif isinstance(A_,A_ ) and isinstance(text[0],A_ ): __UpperCamelCase = [] # Maximum number of queries across batch __UpperCamelCase = max([len(A_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(A_ ) != max_num_queries: __UpperCamelCase = t + [' '] * (max_num_queries - len(A_ )) __UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ ) encodings.append(A_ ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": __UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 ) __UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 ) __UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 ) __UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 ) __UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) __UpperCamelCase = BatchEncoding() __UpperCamelCase = input_ids __UpperCamelCase = attention_mask if query_images is not None: __UpperCamelCase = BatchEncoding() __UpperCamelCase = self.image_processor( A_,return_tensors=A_,**A_ ).pixel_values __UpperCamelCase = query_pixel_values if images is not None: __UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**A_ ),tensor_type=A_ ) def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ): '''simple docstring''' return self.image_processor.post_process(*A_,**A_ ) def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ): '''simple docstring''' return self.image_processor.post_process_object_detection(*A_,**A_ ) def snake_case_ ( self: str,*A_: Tuple,**A_: int ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*A_,**A_ ) def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*A_,**A_ ) def snake_case_ ( self: int,*A_: Any,**A_: Tuple ): '''simple docstring''' return self.tokenizer.decode(*A_,**A_ ) @property def snake_case_ ( self: Optional[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,) return self.image_processor_class @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,) return self.image_processor
310
0
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class A_ ( unittest.TestCase , _a ): def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = load_tool("text-to-speech" ) self.tool.setup() def lowercase ( self : Union[str, Any] ): torch.manual_seed(0 ) _UpperCAmelCase = self.tool("hey" ) _UpperCAmelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) ) def lowercase ( self : Dict ): torch.manual_seed(0 ) _UpperCAmelCase = self.tool("hey" ) _UpperCAmelCase = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
22
import math def _A ( _lowercase ) -> int: """simple docstring""" if not isinstance(_lowercase , _lowercase ): __UpperCamelCase = f'''Input value of [number={number}] must be an integer''' raise TypeError(_lowercase ) if number < 1: __UpperCamelCase = f'''Input value of [number={number}] must be > 0''' raise ValueError(_lowercase ) elif number == 1: return 3 elif number == 2: return 5 else: __UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2 __UpperCamelCase = [3, 5] __UpperCamelCase = 2 __UpperCamelCase = 3 for block in range(1 , _lowercase ): for _ in range(_lowercase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(1_1): __snake_case = 0 try: __snake_case = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
310
0
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: Optional[int] =SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCamelCase__: Dict =4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCamelCase__: List[str] =4 lowerCamelCase__: str =48 lowerCamelCase__: Dict ="pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCamelCase__: Optional[int] =[6, 6, 6, 6] lowerCamelCase__: str =60 lowerCamelCase__: List[Any] =[6, 6, 6, 6] lowerCamelCase__: Any ="pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCamelCase__: Dict =4 lowerCamelCase__: Any ="nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCamelCase__: Tuple =1 lowerCamelCase__: Optional[Any] =1 lowerCamelCase__: List[Any] =126 lowerCamelCase__: List[str] =7 lowerCamelCase__: Optional[Any] =255.0 lowerCamelCase__: Dict ="" return config def lowerCAmelCase_ ( __a , __a ) -> Dict: """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: lowerCamelCase__: Optional[int] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowerCamelCase__: Tuple =name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" ) if "layers" in name: lowerCamelCase__: int =name.replace("layers" , "encoder.stages" ) if "residual_group.blocks" in name: lowerCamelCase__: str =name.replace("residual_group.blocks" , "layers" ) if "attn.proj" in name: lowerCamelCase__: str =name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowerCamelCase__: Union[str, Any] =name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCamelCase__: Union[str, Any] =name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCamelCase__: Optional[int] =name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCamelCase__: List[Any] =name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCamelCase__: List[str] =name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: lowerCamelCase__: List[Any] =name.replace("q_bias" , "query.bias" ) if "k_bias" in name: lowerCamelCase__: int =name.replace("k_bias" , "key.bias" ) if "v_bias" in name: lowerCamelCase__: str =name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: lowerCamelCase__: str =name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if "patch_embed.proj" in name: lowerCamelCase__: Optional[Any] =name.replace("patch_embed.proj" , "patch_embed.projection" ) if name == "norm.weight": lowerCamelCase__: Union[str, Any] ="layernorm.weight" if name == "norm.bias": lowerCamelCase__: Optional[Any] ="layernorm.bias" if "conv_first" in name: lowerCamelCase__: Optional[Any] =name.replace("conv_first" , "first_convolution" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCamelCase__: Any =name.replace("conv_last" , "final_convolution" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCamelCase__: str =name.replace("conv_before_upsample.0" , "conv_before_upsample" ) if "upsample.0" in name: lowerCamelCase__: Tuple =name.replace("upsample.0" , "upsample.convolution_0" ) if "upsample.2" in name: lowerCamelCase__: Optional[int] =name.replace("upsample.2" , "upsample.convolution_1" ) lowerCamelCase__: str ="upsample." + name elif config.upsampler == "pixelshuffledirect": lowerCamelCase__: Optional[Any] =name.replace("upsample.0.weight" , "upsample.conv.weight" ) lowerCamelCase__: Union[str, Any] =name.replace("upsample.0.bias" , "upsample.conv.bias" ) else: pass else: lowerCamelCase__: str ="swin2sr." + name return name def lowerCAmelCase_ ( __a , __a ) -> Any: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__: str =orig_state_dict.pop(_lowercase ) if "qkv" in key: lowerCamelCase__: List[Any] =key.split("." ) lowerCamelCase__: Optional[Any] =int(key_split[1] ) lowerCamelCase__: Any =int(key_split[4] ) lowerCamelCase__: Union[str, Any] =config.embed_dim if "weight" in key: lowerCamelCase__: Union[str, Any] =val[:dim, :] lowerCamelCase__: int =val[dim : dim * 2, :] lowerCamelCase__: List[str] =val[-dim:, :] else: lowerCamelCase__: Tuple =val[:dim] lowerCamelCase__: Tuple =val[dim : dim * 2] lowerCamelCase__: int =val[-dim:] pass else: lowerCamelCase__: str =val return orig_state_dict def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: Dict =get_config(_lowercase ) lowerCamelCase__: Any =SwinaSRForImageSuperResolution(_lowercase ) model.eval() lowerCamelCase__: List[str] =torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" ) lowerCamelCase__: Optional[int] =convert_state_dict(_lowercase , _lowercase ) lowerCamelCase__ , lowerCamelCase__: List[str] =model.load_state_dict(_lowercase , strict=_lowercase ) if len(_lowercase ) > 0: raise ValueError("Missing keys when converting: {}".format(_lowercase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values lowerCamelCase__: List[Any] ="https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" lowerCamelCase__: str =Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("RGB" ) lowerCamelCase__: List[Any] =SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCamelCase__: str =126 if "Jpeg" in checkpoint_url else 256 lowerCamelCase__: str =Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCamelCase__: List[Any] =transforms(_lowercase ).unsqueeze(0 ) if config.num_channels == 1: lowerCamelCase__: Dict =pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCamelCase__: Union[str, Any] =model(_lowercase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCamelCase__: Dict =torch.Size([1, 3, 512, 512] ) lowerCamelCase__: Optional[Any] =torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCamelCase__: List[Any] =torch.Size([1, 3, 1024, 1024] ) lowerCamelCase__: List[Any] =torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCamelCase__: Any =torch.Size([1, 3, 1024, 1024] ) lowerCamelCase__: Optional[Any] =torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCamelCase__: List[Any] =torch.Size([1, 3, 512, 512] ) lowerCamelCase__: int =torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCamelCase__: List[str] =torch.Size([1, 3, 1024, 1024] ) lowerCamelCase__: Any =torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowercase , atol=1e-3 ) print("Looks ok!" ) lowerCamelCase__: Dict ={ "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } lowerCamelCase__: str =url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") __A = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
10
import torch from transformers import AutoModel class __lowerCamelCase (torch.nn.Module ): def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(A_,self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ ) __UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def snake_case_ ( self: Tuple,**A_: Union[str, Any] ): '''simple docstring''' return self.bert(**A_ ).last_hidden_state def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2,keepdim=A_ ) def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(A_,A_ ) ) def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = W_supports['sizes'].tolist() __UpperCamelCase = W_supports['start_token_id'].item() __UpperCamelCase = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**A_ ) __UpperCamelCase = self.BERT(**A_ ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['input_ids'] == start_token_id __UpperCamelCase = W_supports['input_ids'] == end_token_id for i, size in enumerate(A_ ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
310
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any , __lowercase : Union[str, Any] , __lowercase : Union[str, Any]=13 , __lowercase : List[Any]=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Dict=True , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=99 , __lowercase : str=32 , __lowercase : List[str]=5 , __lowercase : Optional[int]=4 , __lowercase : List[Any]=37 , __lowercase : List[str]="gelu" , __lowercase : Any=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Dict=5_12 , __lowercase : Optional[int]=16 , __lowercase : List[str]=2 , __lowercase : Optional[Any]=0.02 , __lowercase : Optional[Any]=4 , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_choices def snake_case__ ( self : List[Any] ): """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_attention_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : str ): """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def snake_case__ ( self : Any ): """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class UpperCAmelCase ( _a , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Any ): """simple docstring""" snake_case_ = FlaxRobertaModelTester(self ) @slow def snake_case__ ( self : Dict ): """simple docstring""" for model_class_name in self.all_model_classes: snake_case_ = model_class_name.from_pretrained("roberta-base" , from_pt=A_ ) snake_case_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(A_ )
187
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = BioGptTokenizer _lowercase = False def snake_case_ ( self: Any ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) ) __UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file,'w' ) as fp: fp.write('\n'.join(A_ ) ) def snake_case_ ( self: Optional[int],A_: List[Any] ): '''simple docstring''' __UpperCamelCase = 'lower newer' __UpperCamelCase = 'lower newer' return input_text, output_text def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file ) __UpperCamelCase = 'lower' __UpperCamelCase = ['low', 'er</w>'] __UpperCamelCase = tokenizer.tokenize(A_ ) self.assertListEqual(A_,A_ ) __UpperCamelCase = tokens + ['<unk>'] __UpperCamelCase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ ) @slow def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) __UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ ) __UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
310
0
def lowerCamelCase__ ( a ) -> list: def merge(a , a ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_lowercase ) <= 1: return collection _A: Optional[Any] = len(_lowercase ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : Dict = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ : Tuple = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
121
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __snake_case = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_a ) class __lowerCamelCase (_a ): _lowercase = """rag""" _lowercase = True def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],): '''simple docstring''' super().__init__( bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" __UpperCamelCase = kwargs.pop('question_encoder' ) __UpperCamelCase = question_encoder_config.pop('model_type' ) __UpperCamelCase = kwargs.pop('generator' ) __UpperCamelCase = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig __UpperCamelCase = AutoConfig.for_model(A_,**A_ ) __UpperCamelCase = AutoConfig.for_model(A_,**A_ ) __UpperCamelCase = reduce_loss __UpperCamelCase = label_smoothing __UpperCamelCase = exclude_bos_score __UpperCamelCase = do_marginalize __UpperCamelCase = title_sep __UpperCamelCase = doc_sep __UpperCamelCase = n_docs __UpperCamelCase = max_combined_length __UpperCamelCase = dataset __UpperCamelCase = dataset_split __UpperCamelCase = index_name __UpperCamelCase = retrieval_vector_size __UpperCamelCase = retrieval_batch_size __UpperCamelCase = passages_path __UpperCamelCase = index_path __UpperCamelCase = use_dummy_dataset __UpperCamelCase = output_retrieved __UpperCamelCase = do_deduplication __UpperCamelCase = use_cache if self.forced_eos_token_id is None: __UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ ) @classmethod def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ ) def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.question_encoder.to_dict() __UpperCamelCase = self.generator.to_dict() __UpperCamelCase = self.__class__.model_type return output
310
0
"""simple docstring""" from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class snake_case ( _a ): SCREAMING_SNAKE_CASE_ : List[str] = ["""image_processor"""] SCREAMING_SNAKE_CASE_ : Dict = """SamImageProcessor""" def __init__( self : int , UpperCamelCase__ : str)-> Optional[int]: '''simple docstring''' super().__init__(A_) __lowerCAmelCase: List[Any] = self.image_processor __lowerCAmelCase: Optional[Any] = -1_0 __lowerCAmelCase: Union[str, Any] = self.image_processor.size["longest_edge"] def __call__( self : List[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Tuple , )-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: Any = self.image_processor( A_ , return_tensors=A_ , **A_ , ) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase: Tuple = encoding_image_processor["original_sizes"] if hasattr(A_ , "numpy"): # Checks if Torch or TF tensor __lowerCAmelCase: Optional[Any] = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[Any] = self._check_and_preprocess_points( input_points=A_ , input_labels=A_ , input_boxes=A_ , ) __lowerCAmelCase: Tuple = self._normalize_and_convert( A_ , A_ , input_points=A_ , input_labels=A_ , input_boxes=A_ , return_tensors=A_ , ) return encoding_image_processor def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]="pt" , )-> List[str]: '''simple docstring''' if input_points is not None: if len(A_) != len(A_): __lowerCAmelCase: Optional[int] = [ self._normalize_coordinates(self.target_size , A_ , original_sizes[0]) for point in input_points ] else: __lowerCAmelCase: int = [ self._normalize_coordinates(self.target_size , A_ , A_) for point, original_size in zip(A_ , A_) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase: str = self._pad_points_and_labels(A_ , A_) __lowerCAmelCase: Any = np.array(A_) if input_labels is not None: __lowerCAmelCase: List[str] = np.array(A_) if input_boxes is not None: if len(A_) != len(A_): __lowerCAmelCase: Dict = [ self._normalize_coordinates(self.target_size , A_ , original_sizes[0] , is_bounding_box=A_) for box in input_boxes ] else: __lowerCAmelCase: int = [ self._normalize_coordinates(self.target_size , A_ , A_ , is_bounding_box=A_) for box, original_size in zip(A_ , A_) ] __lowerCAmelCase: Optional[int] = np.array(A_) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase: Dict = torch.from_numpy(A_) # boxes batch size of 1 by default __lowerCAmelCase: Optional[int] = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase: List[str] = tf.convert_to_tensor(A_) # boxes batch size of 1 by default __lowerCAmelCase: Any = tf.expand_dims(A_ , 1) if len(input_boxes.shape) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes}) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase: Union[str, Any] = torch.from_numpy(A_) # point batch size of 1 by default __lowerCAmelCase: List[Any] = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase: List[str] = tf.convert_to_tensor(A_) # point batch size of 1 by default __lowerCAmelCase: Dict = tf.expand_dims(A_ , 1) if len(input_points.shape) != 4 else input_points encoding_image_processor.update({"input_points": input_points}) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase: Optional[int] = torch.from_numpy(A_) # point batch size of 1 by default __lowerCAmelCase: List[Any] = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase: Dict = tf.convert_to_tensor(A_) # point batch size of 1 by default __lowerCAmelCase: Optional[Any] = tf.expand_dims(A_ , 1) if len(input_labels.shape) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels}) return encoding_image_processor def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: str = max([point.shape[0] for point in input_points]) __lowerCAmelCase: Tuple = [] for i, point in enumerate(A_): if point.shape[0] != expected_nb_points: __lowerCAmelCase: Any = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0) __lowerCAmelCase: List[str] = np.append(input_labels[i] , [self.point_pad_value]) processed_input_points.append(A_) __lowerCAmelCase: str = processed_input_points return input_points, input_labels def lowercase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=False)-> str: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Any = original_size __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.image_processor._get_preprocess_shape(A_ , longest_edge=A_) __lowerCAmelCase: List[str] = deepcopy(A_).astype(A_) if is_bounding_box: __lowerCAmelCase: Optional[int] = coords.reshape(-1 , 2 , 2) __lowerCAmelCase: Any = coords[..., 0] * (new_w / old_w) __lowerCAmelCase: Optional[int] = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase: Optional[Any] = coords.reshape(-1 , 4) return coords def lowercase_ ( self : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , )-> Optional[int]: '''simple docstring''' if input_points is not None: if hasattr(A_ , "numpy"): # Checks for TF or Torch tensor __lowerCAmelCase: List[str] = input_points.numpy().tolist() if not isinstance(A_ , A_) or not isinstance(input_points[0] , A_): raise ValueError("Input points must be a list of list of floating points.") __lowerCAmelCase: Tuple = [np.array(A_) for input_point in input_points] else: __lowerCAmelCase: Dict = None if input_labels is not None: if hasattr(A_ , "numpy"): __lowerCAmelCase: Any = input_labels.numpy().tolist() if not isinstance(A_ , A_) or not isinstance(input_labels[0] , A_): raise ValueError("Input labels must be a list of list integers.") __lowerCAmelCase: int = [np.array(A_) for label in input_labels] else: __lowerCAmelCase: List[Any] = None if input_boxes is not None: if hasattr(A_ , "numpy"): __lowerCAmelCase: Optional[Any] = input_boxes.numpy().tolist() if ( not isinstance(A_ , A_) or not isinstance(input_boxes[0] , A_) or not isinstance(input_boxes[0][0] , A_) ): raise ValueError("Input boxes must be a list of list of list of floating points.") __lowerCAmelCase: List[Any] = [np.array(A_).astype(np.floataa) for box in input_boxes] else: __lowerCAmelCase: Dict = None return input_points, input_labels, input_boxes @property def lowercase_ ( self : Tuple)-> int: '''simple docstring''' __lowerCAmelCase: Dict = self.image_processor.model_input_names return list(dict.fromkeys(A_)) def lowercase_ ( self : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str])-> List[str]: '''simple docstring''' return self.image_processor.post_process_masks(*A_ , **A_)
217
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __lowerCamelCase (_a ): _lowercase = """M-CLIP""" def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ): '''simple docstring''' __UpperCamelCase = transformerDimSize __UpperCamelCase = imageDimSize super().__init__(**A_ ) class __lowerCamelCase (_a ): _lowercase = MCLIPConfig def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ): '''simple docstring''' super().__init__(A_,*A_,**A_ ) __UpperCamelCase = XLMRobertaModel(A_ ) __UpperCamelCase = torch.nn.Linear( in_features=config.transformerDimensions,out_features=config.numDims ) def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ): '''simple docstring''' __UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0] __UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(A_ ), embs
310
0
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> bool: snake_case__ : Any = set() # To detect a back edge, keep track of vertices currently in the recursion stack snake_case__ : List[str] = set() return any( node not in visited and depth_first_search(_lowercase , _lowercase , _lowercase , _lowercase ) for node in graph ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: visited.add(_lowercase ) rec_stk.add(_lowercase ) for node in graph[vertex]: if node not in visited: if depth_first_search(_lowercase , _lowercase , _lowercase , _lowercase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(_lowercase ) return False if __name__ == "__main__": from doctest import testmod testmod()
35
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __lowerCamelCase : _lowercase = XGLMConfig _lowercase = {} _lowercase = """gelu""" def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = ffn_dim __UpperCamelCase = activation_function __UpperCamelCase = activation_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = max_position_embeddings __UpperCamelCase = initializer_range __UpperCamelCase = None __UpperCamelCase = 0 __UpperCamelCase = 2 __UpperCamelCase = 1 def snake_case_ ( self: Dict ): '''simple docstring''' return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = self.get_config() __UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 ) return ( config, input_ids, input_mask, head_mask, ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,) def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ), ( __UpperCamelCase ), ( __UpperCamelCase ), ( __UpperCamelCase ), ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class __lowerCamelCase (_a , _a , unittest.TestCase ): _lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _lowercase = (TFXGLMForCausalLM,) if is_tf_available() else () _lowercase = ( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = TFXGLMModelTester(self ) __UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 ) def snake_case_ ( self: Any ): '''simple docstring''' self.config_tester.run_common_tests() @slow def snake_case_ ( self: Any ): '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFXGLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def snake_case_ ( self: Tuple ): '''simple docstring''' super().test_resize_token_embeddings() @require_tf class __lowerCamelCase (unittest.TestCase ): @slow def snake_case_ ( self: Optional[Any],A_: int=True ): '''simple docstring''' __UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on __UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(),A_ ) @slow def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' ) __UpperCamelCase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] ) __UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ ) __UpperCamelCase = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(A_,A_ ) @slow def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = 'left' # use different length sentences to test batching __UpperCamelCase = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ ) __UpperCamelCase = inputs['input_ids'] __UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 ) __UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids __UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 ) __UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids __UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 ) __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ ) __UpperCamelCase = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(A_,A_ ) self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
310
0
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowerCAmelCase_ ( __lowerCAmelCase )-> int: '''simple docstring''' UpperCAmelCase : List[str] =prime_factors(_lowercase ) if is_square_free(_lowercase ): return -1 if len(_lowercase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
348
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: __snake_case = json.load(f) @require_torch class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int,A_: int ): '''simple docstring''' return FSMTTokenizer.from_pretrained(A_ ) def snake_case_ ( self: Dict,A_: int ): '''simple docstring''' __UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 2_6.0], ['ru-en', 2_2.0], ['en-de', 2_2.0], ['de-en', 2_9.0], ] ) @slow def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = F'''facebook/wmt19-{pair}''' __UpperCamelCase = self.get_tokenizer(A_ ) __UpperCamelCase = self.get_model(A_ ) __UpperCamelCase = bleu_data[pair]['src'] __UpperCamelCase = bleu_data[pair]['tgt'] __UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ ) __UpperCamelCase = model.generate( input_ids=batch.input_ids,num_beams=8,) __UpperCamelCase = tokenizer.batch_decode( A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ ) __UpperCamelCase = calculate_bleu(A_,A_ ) print(A_ ) self.assertGreaterEqual(scores['bleu'],A_ )
310
0
def __UpperCamelCase ( _A ): lowerCAmelCase_ = [0] * len(_lowercase ) lowerCAmelCase_ = [] lowerCAmelCase_ = [1] * len(_lowercase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowercase ) ): if indegree[i] == 0: queue.append(_lowercase ) while queue: lowerCAmelCase_ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCAmelCase_ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(_lowercase ) print(max(_lowercase ) ) # Adjacency list of Graph _A = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
278
def _A ( _lowercase ) -> list[int]: """simple docstring""" if length <= 0 or not isinstance(_lowercase , _lowercase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(_lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=1_0))
310
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class A (unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Union[str, Any]=18 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Union[str, Any]=4_00 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Any=True , ) -> Any: """simple docstring""" A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size_divisor A__ = do_rescale def a_ ( self : int ) -> List[str]: """simple docstring""" return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class A (_a , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = GLPNImageProcessor if is_vision_available() else None def a_ ( self : str ) -> int: """simple docstring""" A__ = GLPNImageProcessingTester(self ) @property def a_ ( self : Tuple ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self : Any ) -> List[str]: """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , """do_resize""" ) ) self.assertTrue(hasattr(A_ , """size_divisor""" ) ) self.assertTrue(hasattr(A_ , """resample""" ) ) self.assertTrue(hasattr(A_ , """do_rescale""" ) ) def a_ ( self : str ) -> Dict: """simple docstring""" pass def a_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def a_ ( self : Tuple ) -> int: """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
274
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = MgpstrTokenizer _lowercase = False _lowercase = {} _lowercase = False def snake_case_ ( self: int ): '''simple docstring''' super().setUp() # fmt: off __UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on __UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) ) __UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) def snake_case_ ( self: Dict,**A_: Tuple ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ ) def snake_case_ ( self: List[Any],A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = 'tester' __UpperCamelCase = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def snake_case_ ( self: str ): '''simple docstring''' pass def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) __UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ ) self.assertEqual(len(A_ ),1 ) __UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ ) self.assertTrue(special_token not in decoded ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ ) __UpperCamelCase = tokenizer.tokenize(A_ ) __UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ ) __UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ ) self.assertListEqual(A_,A_ ) __UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ ) self.assertNotEqual(len(A_ ),0 ) __UpperCamelCase = tokenizer.decode(A_ ) self.assertIsInstance(A_,A_ ) self.assertEqual(text_a.replace(' ','' ),A_ ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def snake_case_ ( self: int ): '''simple docstring''' pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def snake_case_ ( self: List[str] ): '''simple docstring''' pass
310
0
'''simple docstring''' import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class a ( ctypes.Structure ): # _fields is a specific attr expected by ctypes _lowerCAmelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def _A () -> Optional[Any]: '''simple docstring''' if os.name == "nt": _a = CursorInfo() _a = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase ) ) _a = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def _A () -> Any: '''simple docstring''' if os.name == "nt": _a = CursorInfo() _a = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase ) ) _a = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def _A () -> Optional[Any]: '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
168
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( """The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , ) class __lowerCamelCase (_a ): _lowercase = RobertaConfig _lowercase = """roberta""" def __init__( self: Union[str, Any],A_: List[str] ): '''simple docstring''' super().__init__(A_ ) __UpperCamelCase = RobertaEmbeddings(A_ ) self.init_weights() @add_start_docstrings( """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. """ , _a , ) class __lowerCamelCase (_a ): _lowercase = RobertaConfig _lowercase = """roberta""" def __init__( self: Any,A_: int ): '''simple docstring''' super().__init__(A_ ) __UpperCamelCase = config.num_labels __UpperCamelCase = config.num_hidden_layers __UpperCamelCase = DeeRobertaModel(A_ ) __UpperCamelCase = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels ) @add_start_docstrings_to_model_forward(A_ ) def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,): '''simple docstring''' __UpperCamelCase = self.num_layers try: __UpperCamelCase = self.roberta( A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,) __UpperCamelCase = outputs[1] __UpperCamelCase = self.dropout(A_ ) __UpperCamelCase = self.classifier(A_ ) __UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase = e.message __UpperCamelCase = e.exit_layer __UpperCamelCase = outputs[0] if not self.training: __UpperCamelCase = entropy(A_ ) __UpperCamelCase = [] __UpperCamelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase = MSELoss() __UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) ) else: __UpperCamelCase = CrossEntropyLoss() __UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) ) # work with highway exits __UpperCamelCase = [] for highway_exit in outputs[-1]: __UpperCamelCase = highway_exit[0] if not self.training: highway_logits_all.append(A_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase = MSELoss() __UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) ) else: __UpperCamelCase = CrossEntropyLoss() __UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) ) highway_losses.append(A_ ) if train_highway: __UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase = (loss,) + outputs if not self.training: __UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
310
0
'''simple docstring''' __snake_case = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def a ( __a , __a , __a , __a ) -> int: '''simple docstring''' UpperCamelCase__ :Tuple = [False] * len(_lowercase ) UpperCamelCase__ :List[Any] = [s] UpperCamelCase__ :str = True while queue: UpperCamelCase__ :Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowercase ) UpperCamelCase__ :int = True UpperCamelCase__ :str = u return visited[t] def a ( __a , __a , __a ) -> Dict: '''simple docstring''' UpperCamelCase__ :Tuple = [-1] * (len(_lowercase )) UpperCamelCase__ :List[str] = 0 UpperCamelCase__ :Union[str, Any] = [] UpperCamelCase__ :List[Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowercase , _lowercase , _lowercase , _lowercase ): UpperCamelCase__ :Optional[Any] = float('''Inf''' ) UpperCamelCase__ :Dict = sink while s != source: # Find the minimum value in select path UpperCamelCase__ :List[str] = min(_lowercase , graph[parent[s]][s] ) UpperCamelCase__ :str = parent[s] max_flow += path_flow UpperCamelCase__ :Any = sink while v != source: UpperCamelCase__ :Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCamelCase__ :Optional[int] = parent[v] for i in range(len(_lowercase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
97
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __lowerCamelCase : @staticmethod def snake_case_ ( *A_: Optional[Any],**A_: Tuple ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __lowerCamelCase (unittest.TestCase ): _lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 ) self.assertGreater(len(A_ ),0 ) for detected_object in outputs: self.assertEqual( A_,{ 'score': ANY(A_ ), 'label': ANY(A_ ), 'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )}, },) import datasets __UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' ) __UpperCamelCase = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] __UpperCamelCase = object_detector(A_,threshold=0.0 ) self.assertEqual(len(A_ ),len(A_ ) ) for outputs in batch_outputs: self.assertGreater(len(A_ ),0 ) for detected_object in outputs: self.assertEqual( A_,{ 'score': ANY(A_ ), 'label': ANY(A_ ), 'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )}, },) @require_tf @unittest.skip('Object detection not implemented in TF' ) def snake_case_ ( self: str ): '''simple docstring''' pass @require_torch def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3' __UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ ) __UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ],) __UpperCamelCase = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ],threshold=0.0,) self.assertEqual( nested_simplify(A_,decimals=4 ),[ [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], ],) @require_torch @slow def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = 'facebook/detr-resnet-50' __UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ ) __UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ],) __UpperCamelCase = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ],) @require_torch @slow def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = 'facebook/detr-resnet-50' __UpperCamelCase = pipeline('object-detection',model=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ],) __UpperCamelCase = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ],) @require_torch @slow def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = 0.9_9_8_5 __UpperCamelCase = 'facebook/detr-resnet-50' __UpperCamelCase = pipeline('object-detection',model=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ],) @require_torch @require_pytesseract @slow def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd' __UpperCamelCase = 0.9_9_9_3 __UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ ) __UpperCamelCase = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, ],)
310
0
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __SCREAMING_SNAKE_CASE :Tuple = 6378137.0 __SCREAMING_SNAKE_CASE :Union[str, Any] = 6356752.314245 __SCREAMING_SNAKE_CASE :List[Any] = 6378137 def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[Any] , __lowercase : str , __lowercase : List[str] ) -> float: '''simple docstring''' _UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _UpperCAmelCase = atan((1 - flattening) * tan(radians(_lowercase ) ) ) _UpperCAmelCase = atan((1 - flattening) * tan(radians(_lowercase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _UpperCAmelCase = haversine_distance(_lowercase , _lowercase , _lowercase , _lowercase ) / EQUATORIAL_RADIUS # Intermediate P and Q values _UpperCAmelCase = (b_lata + b_lata) / 2 _UpperCAmelCase = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _UpperCAmelCase = (sin(_lowercase ) ** 2) * (cos(_lowercase ) ** 2) _UpperCAmelCase = cos(sigma / 2 ) ** 2 _UpperCAmelCase = (sigma - sin(_lowercase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _UpperCAmelCase = (cos(_lowercase ) ** 2) * (sin(_lowercase ) ** 2) _UpperCAmelCase = sin(sigma / 2 ) ** 2 _UpperCAmelCase = (sigma + sin(_lowercase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
22
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class __lowerCamelCase (_a ): _lowercase = """xlm-roberta""" def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],): '''simple docstring''' super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
310
0
__A = "Input must be a string of 8 numbers plus letter" __A = "TRWAGMYFPDXBNJZSQVHLCKE" def lowerCAmelCase_ ( __a ) -> bool: """simple docstring""" if not isinstance(_lowercase , _lowercase ): lowerCamelCase__: Union[str, Any] =F"""Expected string as input, found {type(_lowercase ).__name__}""" raise TypeError(_lowercase ) lowerCamelCase__: str =spanish_id.replace("-" , "" ).upper() if len(_lowercase ) != 9: raise ValueError(_lowercase ) try: lowerCamelCase__: Tuple =int(spanish_id_clean[0:8] ) lowerCamelCase__: Optional[int] =spanish_id_clean[8] except ValueError as ex: raise ValueError(_lowercase ) from ex if letter.isdigit(): raise ValueError(_lowercase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
10
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __snake_case = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __lowerCamelCase (_a ): _lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} ) _lowercase = field( default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) _lowercase = field( default=_a , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) _lowercase = field( default=_a , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) _lowercase = field( default=_a , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(A_,A_ ): __UpperCamelCase = v.to_dict() return d
310
0
import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCAmelCase ( _a ): '''simple docstring''' def __init__( self : str , __lowercase : VQModel , __lowercase : UNetaDModel , __lowercase : DDIMScheduler ): """simple docstring""" super().__init__() self.register_modules(vqvae=A_ , unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self : str , __lowercase : int = 1 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : float = 0.0 , __lowercase : int = 50 , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , **__lowercase : int , ): """simple docstring""" snake_case_ = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A_ , ) snake_case_ = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler snake_case_ = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(A_ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature snake_case_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) snake_case_ = {} if accepts_eta: snake_case_ = eta for t in self.progress_bar(self.scheduler.timesteps ): snake_case_ = self.scheduler.scale_model_input(A_ , A_ ) # predict the noise residual snake_case_ = self.unet(A_ , A_ ).sample # compute the previous noisy sample x_t -> x_t-1 snake_case_ = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample # decode the image latents with the VAE snake_case_ = self.vqvae.decode(A_ ).sample snake_case_ = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
187
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _A ( _lowercase ) -> Dict: """simple docstring""" if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ): return False return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule ) def _A ( _lowercase , _lowercase = True ) -> Optional[int]: """simple docstring""" __UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __UpperCamelCase = is_compiled_module(_lowercase ) if is_compiled: __UpperCamelCase = model __UpperCamelCase = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_lowercase , _lowercase ): __UpperCamelCase = model.module if not keep_fpaa_wrapper: __UpperCamelCase = getattr(_lowercase , 'forward' ) __UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase ) if original_forward is not None: while hasattr(_lowercase , '__wrapped__' ): __UpperCamelCase = forward.__wrapped__ if forward == original_forward: break __UpperCamelCase = forward if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ): convert_model(_lowercase , to_transformer_engine=_lowercase ) if is_compiled: __UpperCamelCase = model __UpperCamelCase = compiled_model return model def _A ( ) -> Any: """simple docstring""" PartialState().wait_for_everyone() def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(_lowercase , _lowercase ) elif PartialState().local_process_index == 0: torch.save(_lowercase , _lowercase ) @contextmanager def _A ( **_lowercase ) -> Union[str, Any]: """simple docstring""" for key, value in kwargs.items(): __UpperCamelCase = str(_lowercase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _A ( _lowercase ) -> Tuple: """simple docstring""" if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ): __UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase ) if hasattr(_lowercase , '__qualname__' ): return obj.__qualname__ if hasattr(_lowercase , '__name__' ): return obj.__name__ return str(_lowercase ) def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" for key, value in source.items(): if isinstance(_lowercase , _lowercase ): __UpperCamelCase = destination.setdefault(_lowercase , {} ) merge_dicts(_lowercase , _lowercase ) else: __UpperCamelCase = value return destination def _A ( _lowercase = None ) -> bool: """simple docstring""" if port is None: __UpperCamelCase = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
310
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( _a , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Optional[Any] = GPTaTokenizer __UpperCamelCase : List[Any] = GPTaTokenizerFast __UpperCamelCase : Any = True __UpperCamelCase : Tuple = {'''add_prefix_space''': True} __UpperCamelCase : int = False def __magic_name__ ( self : List[Any] ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _A: List[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] _A: str = dict(zip(A_ , range(len(A_ ) ) ) ) _A: Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _A: List[Any] = {'''unk_token''': '''<unk>'''} _A: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A_ ) ) def __magic_name__ ( self : List[Any] , **lowerCAmelCase_ : List[str] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ ) def __magic_name__ ( self : Any , **lowerCAmelCase_ : List[str] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ): """simple docstring""" _A: Dict = '''lower newer''' _A: Optional[int] = '''lower newer''' return input_text, output_text def __magic_name__ ( self : Tuple ): """simple docstring""" _A: Tuple = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _A: Union[str, Any] = '''lower newer''' _A: str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _A: Union[str, Any] = tokenizer.tokenize(A_ , add_prefix_space=A_ ) self.assertListEqual(A_ , A_ ) _A: Optional[Any] = tokens + [tokenizer.unk_token] _A: Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def __magic_name__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return _A: List[Any] = self.get_tokenizer() _A: List[Any] = self.get_rust_tokenizer(add_prefix_space=A_ ) _A: Optional[Any] = '''lower newer''' # Testing tokenization _A: str = tokenizer.tokenize(A_ , add_prefix_space=A_ ) _A: Optional[int] = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids without special tokens _A: List[str] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ ) _A: Optional[Any] = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) # Testing conversion to ids with special tokens _A: Any = self.get_rust_tokenizer(add_prefix_space=A_ ) _A: int = tokenizer.encode(A_ , add_prefix_space=A_ ) _A: Optional[Any] = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # Testing the unknown token _A: Tuple = tokens + [rust_tokenizer.unk_token] _A: Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def __magic_name__ ( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ): """simple docstring""" pass def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple=1_5 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _A: Optional[Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input _A: List[str] = '''This is a simple input''' _A: Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] _A: Tuple = ('''This is a simple input''', '''This is a pair''') _A: List[str] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='''max_length''' ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='''max_length''' ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='''max_length''' , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='''max_length''' ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='''max_length''' ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='''max_length''' , ) def __magic_name__ ( self : Any ): """simple docstring""" _A: List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input _A: int = '''This is a simple input''' _A: List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input'''] _A: str = ('''This is a simple input''', '''This is a pair''') _A: Dict = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] _A: Dict = tokenizer.pad_token_id _A: List[str] = tokenizer(A_ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' ) _A: Optional[int] = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='''np''' ) _A: str = tokenizer(*A_ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' ) _A: Optional[Any] = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def __magic_name__ ( self : List[str] ): """simple docstring""" _A: Dict = '''$$$''' _A: Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ ) _A: Any = '''This is a simple input''' _A: str = ['''This is a simple input 1''', '''This is a simple input 2'''] _A: Optional[Any] = tokenizer.bos_token_id _A: Tuple = tokenizer(A_ ) _A: str = tokenizer(A_ ) self.assertEqual(out_s.input_ids[0] , A_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _A: Optional[int] = tokenizer.decode(out_s.input_ids ) _A: Dict = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __magic_name__ ( self : List[str] ): """simple docstring""" pass def __magic_name__ ( self : Tuple ): """simple docstring""" _A: Dict = [self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )] for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _A: List[Any] = '''Encode this.''' _A: List[str] = '''This one too please.''' _A: Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ ) encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ ) _A: Tuple = tokenizer.encode_plus( A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , ) _A: Dict = encoded_sequence_dict['''input_ids'''] _A: Dict = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(A_ ) , len(A_ ) ) _A: Dict = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ ) ] _A: List[Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(A_ , A_ ) @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __magic_name__ ( self : Optional[int] ): """simple docstring""" _A: Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A_ ) _A: List[str] = '''A photo of a cat''' _A: Union[str, Any] = tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('''test_opt''' ) _A: str = AutoTokenizer.from_pretrained('''./test_opt''' ) _A: Any = tokenizer.encode( A_ , ) self.assertEqual(A_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def __magic_name__ ( self : Any ): """simple docstring""" _A: str = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=A_ ) _A: Optional[int] = '''A photo of a cat''' _A: Tuple = tokenizer.encode( A_ , ) # Same as above self.assertEqual(A_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def __magic_name__ ( self : List[Any] ): """simple docstring""" _A: str = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A_ ) _A: List[str] = '''bos''' _A: Union[str, Any] = tokenizer.get_vocab()['''bos'''] _A: Dict = '''A photo of a cat''' _A: List[Any] = tokenizer.encode( A_ , ) # We changed the bos token self.assertEqual(A_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('''./tok''' ) _A: Any = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) _A: Union[str, Any] = tokenizer.encode( A_ , ) self.assertEqual(A_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
121
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case = logging.getLogger(__name__) def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" return (preds == labels).mean() @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _lowercase = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCamelCase : _lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) _lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} ) _lowercase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _lowercase = field( default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _A ( ) -> str: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _lowercase ) # Set seed set_seed(training_args.seed ) try: __UpperCamelCase = processors[data_args.task_name]() __UpperCamelCase = processor.get_labels() __UpperCamelCase = len(_lowercase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_lowercase ) -> Dict: __UpperCamelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_lowercase , p.label_ids )} # Data collator __UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCamelCase = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(_lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowercase ) return results def _A ( _lowercase ) -> List[Any]: """simple docstring""" main() if __name__ == "__main__": main()
310
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class snake_case : @staticmethod def lowercase_ ( *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple)-> List[str]: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class snake_case ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any])-> List[Any]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = ObjectDetectionPipeline(model=A_ , image_processor=A_) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowercase_ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Dict = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0) self.assertGreater(len(A_) , 0) for detected_object in outputs: self.assertEqual( A_ , { "score": ANY(A_), "label": ANY(A_), "box": {"xmin": ANY(A_), "ymin": ANY(A_), "xmax": ANY(A_), "ymax": ANY(A_)}, } , ) import datasets __lowerCAmelCase: Dict = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test") __lowerCAmelCase: str = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] __lowerCAmelCase: List[Any] = object_detector(A_ , threshold=0.0) self.assertEqual(len(A_) , len(A_)) for outputs in batch_outputs: self.assertGreater(len(A_) , 0) for detected_object in outputs: self.assertEqual( A_ , { "score": ANY(A_), "label": ANY(A_), "box": {"xmin": ANY(A_), "ymin": ANY(A_), "xmax": ANY(A_), "ymax": ANY(A_)}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF") def lowercase_ ( self : str)-> str: '''simple docstring''' pass @require_torch def lowercase_ ( self : List[Any])-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: Optional[int] = "hf-internal-testing/tiny-detr-mobilenetsv3" __lowerCAmelCase: Dict = AutoModelForObjectDetection.from_pretrained(A_) __lowerCAmelCase: Optional[int] = AutoFeatureExtractor.from_pretrained(A_) __lowerCAmelCase: Any = ObjectDetectionPipeline(model=A_ , feature_extractor=A_) __lowerCAmelCase: int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0) self.assertEqual( nested_simplify(A_ , decimals=4) , [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ] , ) __lowerCAmelCase: Union[str, Any] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(A_ , decimals=4) , [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ], ] , ) @require_torch @slow def lowercase_ ( self : Optional[Any])-> int: '''simple docstring''' __lowerCAmelCase: Optional[int] = "facebook/detr-resnet-50" __lowerCAmelCase: Union[str, Any] = AutoModelForObjectDetection.from_pretrained(A_) __lowerCAmelCase: List[str] = AutoFeatureExtractor.from_pretrained(A_) __lowerCAmelCase: int = ObjectDetectionPipeline(model=A_ , feature_extractor=A_) __lowerCAmelCase: Dict = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(A_ , decimals=4) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) __lowerCAmelCase: List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ]) self.assertEqual( nested_simplify(A_ , decimals=4) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], ] , ) @require_torch @slow def lowercase_ ( self : str)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = "facebook/detr-resnet-50" __lowerCAmelCase: Dict = pipeline("object-detection" , model=A_) __lowerCAmelCase: Tuple = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(A_ , decimals=4) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) __lowerCAmelCase: Union[str, Any] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ]) self.assertEqual( nested_simplify(A_ , decimals=4) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], ] , ) @require_torch @slow def lowercase_ ( self : List[str])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = 0.9985 __lowerCAmelCase: Union[str, Any] = "facebook/detr-resnet-50" __lowerCAmelCase: str = pipeline("object-detection" , model=A_) __lowerCAmelCase: List[str] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=A_) self.assertEqual( nested_simplify(A_ , decimals=4) , [ {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) @require_torch @require_pytesseract @slow def lowercase_ ( self : List[str])-> int: '''simple docstring''' __lowerCAmelCase: Dict = "Narsil/layoutlmv3-finetuned-funsd" __lowerCAmelCase: int = 0.9993 __lowerCAmelCase: str = pipeline("object-detection" , model=A_ , threshold=A_) __lowerCAmelCase: List[str] = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png") self.assertEqual( nested_simplify(A_ , decimals=4) , [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}}, ] , )
217
import os def _A ( ) -> Tuple: """simple docstring""" with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file: __UpperCamelCase = str(file.readlines()[0] ) __UpperCamelCase = names.replace('"' , '' ).split(',' ) names.sort() __UpperCamelCase = 0 __UpperCamelCase = 0 for i, name in enumerate(_lowercase ): for letter in name: name_score += ord(_lowercase ) - 64 total_score += (i + 1) * name_score __UpperCamelCase = 0 return total_score if __name__ == "__main__": print(solution())
310
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {} __UpperCamelCase = padding_side return tokenizer( [line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , ) def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]: """simple docstring""" __UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCamelCase (_a ): def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",): '''simple docstring''' super().__init__() __UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' ) __UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' ) __UpperCamelCase = self.get_char_lens(self.src_file ) __UpperCamelCase = max_source_length __UpperCamelCase = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' __UpperCamelCase = tokenizer __UpperCamelCase = prefix if n_obs is not None: __UpperCamelCase = self.src_lens[:n_obs] __UpperCamelCase = src_lang __UpperCamelCase = tgt_lang def __len__( self: Optional[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self: int,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = index + 1 # linecache starts at 1 __UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' ) __UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer,A_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __UpperCamelCase = ( self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer ) __UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer __UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' ) __UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' ) __UpperCamelCase = source_inputs['input_ids'].squeeze() __UpperCamelCase = target_inputs['input_ids'].squeeze() __UpperCamelCase = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def snake_case_ ( A_: List[Any] ): '''simple docstring''' return [len(A_ ) for x in Path(A_ ).open().readlines()] def snake_case_ ( self: Union[str, Any],A_: Any ): '''simple docstring''' __UpperCamelCase = torch.stack([x['input_ids'] for x in batch] ) __UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] ) __UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] ) __UpperCamelCase = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer,A_ ) else self.tokenizer.pad_token_id ) __UpperCamelCase = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer,A_ ) else self.tokenizer.pad_token_id ) __UpperCamelCase = trim_batch(A_,A_ ) __UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ ) __UpperCamelCase = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch __snake_case = getLogger(__name__) def _A ( _lowercase ) -> Any: """simple docstring""" return list(itertools.chain.from_iterable(_lowercase ) ) def _A ( _lowercase ) -> None: """simple docstring""" __UpperCamelCase = get_git_info() save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) ) def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]: """simple docstring""" with open(_lowercase , 'w' ) as f: json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase ) def _A ( _lowercase ) -> Union[str, Any]: """simple docstring""" with open(_lowercase ) as f: return json.load(_lowercase ) def _A ( ) -> Dict: """simple docstring""" __UpperCamelCase = git.Repo(search_parent_directories=_lowercase ) __UpperCamelCase = { 'repo_id': str(_lowercase ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def _A ( _lowercase , _lowercase ) -> List: """simple docstring""" return list(map(_lowercase , _lowercase ) ) def _A ( _lowercase , _lowercase ) -> Tuple: """simple docstring""" with open(_lowercase , 'wb' ) as f: return pickle.dump(_lowercase , _lowercase ) def _A ( _lowercase ) -> List[Any]: """simple docstring""" def remove_articles(_lowercase ): return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase ) def white_space_fix(_lowercase ): return " ".join(text.split() ) def remove_punc(_lowercase ): __UpperCamelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) ) def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = normalize_answer(_lowercase ).split() __UpperCamelCase = normalize_answer(_lowercase ).split() __UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase ) __UpperCamelCase = sum(common.values() ) if num_same == 0: return 0 __UpperCamelCase = 1.0 * num_same / len(_lowercase ) __UpperCamelCase = 1.0 * num_same / len(_lowercase ) __UpperCamelCase = (2 * precision * recall) / (precision + recall) return fa def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" return normalize_answer(_lowercase ) == normalize_answer(_lowercase ) def _A ( _lowercase , _lowercase ) -> Dict: """simple docstring""" assert len(_lowercase ) == len(_lowercase ) __UpperCamelCase = 0 for hypo, pred in zip(_lowercase , _lowercase ): em += exact_match_score(_lowercase , _lowercase ) if len(_lowercase ) > 0: em /= len(_lowercase ) return {"em": em} def _A ( _lowercase ) -> Optional[Any]: """simple docstring""" return model_prefix.startswith('rag' ) def _A ( _lowercase , _lowercase , _lowercase ) -> Dict: """simple docstring""" __UpperCamelCase = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __UpperCamelCase = 'dropout_rate' for p in extra_params: if getattr(_lowercase , _lowercase , _lowercase ): if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) ) delattr(_lowercase , _lowercase ) continue __UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p] setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) ) delattr(_lowercase , _lowercase ) return hparams, config
310
0
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __snake_case ( _a ): __lowerCamelCase : List[Any] = 42 __lowerCamelCase : Optional[int] = 42 def __init__( self , snake_case__ , snake_case__ ) -> str: '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ) -> Any: '''simple docstring''' UpperCAmelCase : int =self.unet.config.sample_size UpperCAmelCase : str =(batch_size, 3, img_size, img_size) UpperCAmelCase : Tuple =self.unet UpperCAmelCase : Tuple =randn_tensor(A_ , generator=A_ ) * self.scheduler.init_noise_sigma UpperCAmelCase : str =sample.to(self.device ) self.scheduler.set_timesteps(A_ ) self.scheduler.set_sigmas(A_ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): UpperCAmelCase : Tuple =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): UpperCAmelCase : List[str] =self.unet(A_ , A_ ).sample UpperCAmelCase : List[str] =self.scheduler.step_correct(A_ , A_ , generator=A_ ).prev_sample # prediction step UpperCAmelCase : Optional[int] =model(A_ , A_ ).sample UpperCAmelCase : Optional[int] =self.scheduler.step_pred(A_ , A_ , A_ , generator=A_ ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] =output.prev_sample, output.prev_sample_mean UpperCAmelCase : int =sample_mean.clamp(0 , 1 ) UpperCAmelCase : Tuple =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : int =self.numpy_to_pil(A_ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=A_ )
348
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMAEForPreTraining''', '''ViTMAELayer''', '''ViTMAEModel''', '''ViTMAEPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TFViTMAEForPreTraining''', '''TFViTMAEModel''', '''TFViTMAEPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
310
0
def __UpperCamelCase ( _A ): lowerCAmelCase_ = generate_pascal_triangle(_lowercase ) for row_idx in range(_lowercase ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def __UpperCamelCase ( _A ): if not isinstance(_lowercase , _lowercase ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) lowerCAmelCase_ = [] for current_row_idx in range(_lowercase ): lowerCAmelCase_ = populate_current_row(_lowercase , _lowercase ) triangle.append(_lowercase ) return triangle def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowerCAmelCase_ , lowerCAmelCase_ = 1, 1 for current_col_idx in range(1 , _lowercase ): calculate_current_element( _lowercase , _lowercase , _lowercase , _lowercase ) return current_row def __UpperCamelCase ( _A , _A , _A , _A , ): lowerCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1] lowerCAmelCase_ = triangle[current_row_idx - 1][current_col_idx] lowerCAmelCase_ = above_to_left_elt + above_to_right_elt def __UpperCamelCase ( _A ): if not isinstance(_lowercase , _lowercase ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) lowerCAmelCase_ = [[1]] for row_index in range(1 , _lowercase ): lowerCAmelCase_ = [0] + result[-1] + [0] lowerCAmelCase_ = row_index + 1 # Calculate the number of distinct elements in a row lowerCAmelCase_ = sum(divmod(_lowercase , 2 ) ) lowerCAmelCase_ = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowerCAmelCase_ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowerCAmelCase_ = row_first_half + row_second_half result.append(_lowercase ) return result def __UpperCamelCase ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(_A , _A ) -> None: lowerCAmelCase_ = f"{func.__name__}({value})" lowerCAmelCase_ = timeit(f"__main__.{call}" , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_lowercase , _lowercase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
278
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup __snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]: """simple docstring""" __UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ): __UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip() __UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
310
0
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> str: """simple docstring""" raise RuntimeError("""CUDA out of memory.""" ) class A (nn.Module ): '''simple docstring''' def __init__( self : Tuple ) -> Optional[int]: """simple docstring""" super().__init__() A__ = nn.Linear(3 , 4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4 , 5 ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(A_ ) ) ) class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Optional[int] ) -> Dict: """simple docstring""" A__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(__lowerCAmelCase : List[Any] ): nonlocal batch_sizes batch_sizes.append(A_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A_ , [1_28, 64, 32, 16, 8] ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(__lowerCAmelCase : int , __lowerCAmelCase : List[str] ): nonlocal batch_sizes batch_sizes.append(A_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga A__ , A__ = mock_training_loop_function("""hello""" ) self.assertListEqual(A_ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def a_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__lowerCAmelCase : int ): pass with self.assertRaises(A_ ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def a_ ( self : int ) -> Optional[int]: """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__lowerCAmelCase : Dict ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A_ ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def a_ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A_ ) as cm: mock_training_loop_function(1_28 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1=\'hello\', arg2=\'world\')""" , cm.exception.args[0] ) def a_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(__lowerCAmelCase : List[Any] ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A_ ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def a_ ( self : Optional[int] ) -> Dict: """simple docstring""" A__ = torch.cuda.memory_allocated() A__ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A_ ) A__ = release_memory(A_ ) self.assertEqual(torch.cuda.memory_allocated() , A_ )
274
def _A ( _lowercase ) -> list: """simple docstring""" def merge(_lowercase , _lowercase ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_lowercase ) <= 1: return collection __UpperCamelCase = len(_lowercase ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() __snake_case = input('''Enter numbers separated by a comma:\n''').strip() __snake_case = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
310
0
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_a ) class a ( _a ): def __init__( self , **__magic_name__ ) -> List[Any]: super().__init__(**A_ ) if self.framework != "pt": raise ValueError(f'The {self.__class__} is only available in PyTorch.' ) # No specific FOR_XXX available yet def __call__( self , __magic_name__ , **__magic_name__ ) -> int: return super().__call__(A_ , **A_ ) def __UpperCAmelCase ( self , **__magic_name__ ) -> Optional[Any]: _a = {} if "candidate_labels" in kwargs: _a = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: _a = kwargs['hypothesis_template'] return preprocess_params, {}, {} def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None , __magic_name__="This is a sound of {}." ) -> int: if isinstance(A_ , A_ ): if audio.startswith('http://' ) or audio.startswith('https://' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png _a = requests.get(A_ ).content else: with open(A_ , 'rb' ) as f: _a = f.read() if isinstance(A_ , A_ ): _a = ffmpeg_read(A_ , self.feature_extractor.sampling_rate ) if not isinstance(A_ , np.ndarray ): raise ValueError('We expect a numpy ndarray as input' ) if len(audio.shape ) != 1: raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' ) _a = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' ) _a = candidate_labels _a = [hypothesis_template.format(A_ ) for x in candidate_labels] _a = self.tokenizer(A_ , return_tensors=self.framework , padding=A_ ) _a = [text_inputs] return inputs def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]: _a = model_inputs.pop('candidate_labels' ) _a = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , A_ ): _a = text_inputs[0] else: # Batching case. _a = text_inputs[0][0] _a = self.model(**A_ , **A_ ) _a = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_audio, } return model_outputs def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]: _a = model_outputs.pop('candidate_labels' ) _a = model_outputs['logits'][0] if self.framework == "pt": _a = logits.softmax(dim=0 ) _a = probs.tolist() else: raise ValueError('`tf` framework not supported.' ) _a = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(A_ , A_ ) , key=lambda __magic_name__ : -x[0] ) ] return result
168
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowerCamelCase (_a ): _lowercase = 0 _lowercase = False _lowercase = 3.0 class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Any ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs(),{} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs(),{'a': 2} ) self.assertDictEqual(MockClass(a=2,b=A_ ).to_kwargs(),{'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2,c=2.2_5 ).to_kwargs(),{'a': 2, 'c': 2.2_5} ) @require_cuda def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = GradScalerKwargs(init_scale=1024,growth_factor=2 ) AcceleratorState._reset_state() __UpperCamelCase = Accelerator(mixed_precision='fp16',kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __UpperCamelCase = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale,1_0_2_4.0 ) self.assertEqual(scaler._growth_factor,2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor,0.5 ) self.assertEqual(scaler._growth_interval,2000 ) self.assertEqual(scaler._enabled,A_ ) @require_multi_gpu def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(A_,env=os.environ.copy() ) if __name__ == "__main__": __snake_case = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) __snake_case = Accelerator(kwargs_handlers=[ddp_scaler]) __snake_case = torch.nn.Linear(1_0_0, 2_0_0) __snake_case = accelerator.prepare(model) # Check the values changed in kwargs __snake_case = '''''' __snake_case = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
310
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __snake_case = 3 def a ( __a ) -> int: '''simple docstring''' print('''Generating primitive root of p''' ) while True: UpperCamelCase__ :int = random.randrange(3 , _lowercase ) if pow(_lowercase , 2 , _lowercase ) == 1: continue if pow(_lowercase , _lowercase , _lowercase ) == 1: continue return g def a ( __a ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: '''simple docstring''' print('''Generating prime p...''' ) UpperCamelCase__ :List[Any] = rabin_miller.generate_large_prime(_lowercase ) # select large prime number. UpperCamelCase__ :Union[str, Any] = primitive_root(_lowercase ) # one primitive root on modulo p. UpperCamelCase__ :Any = random.randrange(3 , _lowercase ) # private_key -> have to be greater than 2 for safety. UpperCamelCase__ :int = cryptomath.find_mod_inverse(pow(_lowercase , _lowercase , _lowercase ) , _lowercase ) UpperCamelCase__ :Optional[Any] = (key_size, e_a, e_a, p) UpperCamelCase__ :Optional[Any] = (key_size, d) return public_key, private_key def a ( __a , __a ) -> None: '''simple docstring''' if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() UpperCamelCase__ , UpperCamelCase__ :int = generate_key(_lowercase ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo: fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' , '''w''' ) as fo: fo.write(f'''{private_key[0]},{private_key[1]}''' ) def a ( ) -> None: '''simple docstring''' print('''Making key files...''' ) make_key_files('''elgamal''' , 2048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
97
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowerCamelCase (_a ): _lowercase = ["""image_processor""", """tokenizer"""] _lowercase = """OwlViTImageProcessor""" _lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ): '''simple docstring''' __UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.',A_,) __UpperCamelCase = kwargs.pop('feature_extractor' ) __UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(A_,A_ ) def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )): __UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )] elif isinstance(A_,A_ ) and isinstance(text[0],A_ ): __UpperCamelCase = [] # Maximum number of queries across batch __UpperCamelCase = max([len(A_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(A_ ) != max_num_queries: __UpperCamelCase = t + [' '] * (max_num_queries - len(A_ )) __UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ ) encodings.append(A_ ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": __UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 ) __UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 ) __UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 ) __UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 ) __UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) __UpperCamelCase = BatchEncoding() __UpperCamelCase = input_ids __UpperCamelCase = attention_mask if query_images is not None: __UpperCamelCase = BatchEncoding() __UpperCamelCase = self.image_processor( A_,return_tensors=A_,**A_ ).pixel_values __UpperCamelCase = query_pixel_values if images is not None: __UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**A_ ),tensor_type=A_ ) def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ): '''simple docstring''' return self.image_processor.post_process(*A_,**A_ ) def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ): '''simple docstring''' return self.image_processor.post_process_object_detection(*A_,**A_ ) def snake_case_ ( self: str,*A_: Tuple,**A_: int ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*A_,**A_ ) def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*A_,**A_ ) def snake_case_ ( self: int,*A_: Any,**A_: Tuple ): '''simple docstring''' return self.tokenizer.decode(*A_,**A_ ) @property def snake_case_ ( self: Optional[Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,) return self.image_processor_class @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,) return self.image_processor
310
0
'''simple docstring''' from __future__ import annotations from math import pi def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : int , __lowercase : List[Any] ) -> dict[str, float]: '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
22
import math def _A ( _lowercase ) -> int: """simple docstring""" if not isinstance(_lowercase , _lowercase ): __UpperCamelCase = f'''Input value of [number={number}] must be an integer''' raise TypeError(_lowercase ) if number < 1: __UpperCamelCase = f'''Input value of [number={number}] must be > 0''' raise ValueError(_lowercase ) elif number == 1: return 3 elif number == 2: return 5 else: __UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2 __UpperCamelCase = [3, 5] __UpperCamelCase = 2 __UpperCamelCase = 3 for block in range(1 , _lowercase ): for _ in range(_lowercase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(1_1): __snake_case = 0 try: __snake_case = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
310
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = None # Automatically constructed lowercase_ = "dict" lowercase_ = None lowercase_ = field(default="Translation" , init=_a , repr=_a ) def __call__(self : Tuple) ->Any: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]: '''simple docstring''' from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = None lowercase_ = None lowercase_ = None # Automatically constructed lowercase_ = "dict" lowercase_ = None lowercase_ = field(default="TranslationVariableLanguages" , init=_a , repr=_a ) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[int] =sorted(set(self.languages)) if self.languages else None lowerCamelCase__: Dict =len(self.languages) if self.languages else None def __call__(self : Any) ->Dict: '''simple docstring''' return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: List[str] =set(self.languages) if self.languages and set(A_) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(A_) - lang_set))}) are not in valid set ({", ".join(A_)}).""") # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowerCamelCase__: List[Any] =[] for lang, text in translation_dict.items(): if isinstance(A_ , A_): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. lowerCamelCase__ , lowerCamelCase__: int =zip(*sorted(A_)) return {"language": languages, "translation": translations} def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
10
import torch from transformers import AutoModel class __lowerCamelCase (torch.nn.Module ): def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(A_,self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ ) __UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def snake_case_ ( self: Tuple,**A_: Union[str, Any] ): '''simple docstring''' return self.bert(**A_ ).last_hidden_state def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ): '''simple docstring''' return token_embeddings.sum(2,keepdim=A_ ) def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ): '''simple docstring''' return self.softmax(T * self.cos(A_,A_ ) ) def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = W_supports['sizes'].tolist() __UpperCamelCase = W_supports['start_token_id'].item() __UpperCamelCase = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**A_ ) __UpperCamelCase = self.BERT(**A_ ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['input_ids'] == start_token_id __UpperCamelCase = W_supports['input_ids'] == end_token_id for i, size in enumerate(A_ ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
310
0
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase__ : str = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ : Dict = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} lowercase__ : Tuple = "zero2" lowercase__ : Optional[int] = "zero3" lowercase__ : Any = [ZEROa, ZEROa] def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' snake_case_ = parameterized.to_safe_name("_".join(str(_lowercase ) for x in param.args ) ) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test lowercase__ : Any = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class UpperCAmelCase ( _a ): '''simple docstring''' @parameterized.expand(A_ , name_func=A_ ) def snake_case__ ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[int] ): """simple docstring""" self.run_and_check( stage=A_ , model=A_ , distributed=A_ , fpaa=A_ , ) @require_torch_multi_gpu @parameterized.expand(A_ , name_func=A_ ) def snake_case__ ( self : Tuple , __lowercase : Any , __lowercase : Any ): """simple docstring""" self.run_and_check( stage=A_ , model=A_ , distributed=A_ , fpaa=A_ , ) @parameterized.expand(A_ , name_func=A_ ) def snake_case__ ( self : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] ): """simple docstring""" self.run_and_check( stage=A_ , model=A_ , distributed=A_ , fpaa=A_ , ) @require_torch_multi_gpu @parameterized.expand(A_ , name_func=A_ ) def snake_case__ ( self : Union[str, Any] , __lowercase : int , __lowercase : Tuple ): """simple docstring""" self.run_and_check( stage=A_ , model=A_ , distributed=A_ , fpaa=A_ , ) def snake_case__ ( self : Any , __lowercase : List[Any] ): """simple docstring""" pass def snake_case__ ( self : Optional[Any] , __lowercase : str , __lowercase : str , __lowercase : int = 10 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : bool = True , ): """simple docstring""" snake_case_ = models[model] snake_case_ = self.run_trainer( stage=A_ , model_name=A_ , eval_steps=A_ , num_train_epochs=1 , distributed=A_ , fpaa=A_ , ) self.do_checks(A_ ) return output_dir def snake_case__ ( self : Union[str, Any] , __lowercase : str , __lowercase : str , __lowercase : int = 10 , __lowercase : int = 1 , __lowercase : bool = True , __lowercase : bool = True , ): """simple docstring""" snake_case_ = self.get_auto_remove_tmp_dir("./xxx" , after=A_ ) snake_case_ = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files snake_case_ = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() snake_case_ = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] snake_case_ = self.get_launcher(A_ ) snake_case_ = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(A_ , env=self.get_env() ) return output_dir def snake_case__ ( self : str , __lowercase : List[Any]=False ): """simple docstring""" snake_case_ = min(2 , get_gpu_count() ) if distributed else 1 return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
187
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = BioGptTokenizer _lowercase = False def snake_case_ ( self: Any ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) ) __UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file,'w' ) as fp: fp.write('\n'.join(A_ ) ) def snake_case_ ( self: Optional[int],A_: List[Any] ): '''simple docstring''' __UpperCamelCase = 'lower newer' __UpperCamelCase = 'lower newer' return input_text, output_text def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file ) __UpperCamelCase = 'lower' __UpperCamelCase = ['low', 'er</w>'] __UpperCamelCase = tokenizer.tokenize(A_ ) self.assertListEqual(A_,A_ ) __UpperCamelCase = tokens + ['<unk>'] __UpperCamelCase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ ) @slow def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) __UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ ) __UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
310
0
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCAmelCase__ : List[str] = TypeVar('T') class UpperCAmelCase ( Generic[T] ): '''simple docstring''' __UpperCamelCase : str = 42 # Cache store of keys __UpperCamelCase : int = 42 # References of the keys in cache __UpperCamelCase : Optional[int] = 10 # Maximum capacity of cache def __init__( self : List[str] , lowerCAmelCase_ : int ): """simple docstring""" _A: Tuple = deque() _A: Any = set() if not n: _A: int = sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: _A: str = n def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : T ): """simple docstring""" if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: _A: Dict = self.dq_store.pop() self.key_reference.remove(A_ ) else: self.dq_store.remove(A_ ) self.dq_store.appendleft(A_ ) self.key_reference.add(A_ ) def __magic_name__ ( self : List[Any] ): """simple docstring""" for k in self.dq_store: print(A_ ) def __repr__( self : List[Any] ): """simple docstring""" return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : List[str] = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
121
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __snake_case = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_a ) class __lowerCamelCase (_a ): _lowercase = """rag""" _lowercase = True def __init__( self: Tuple,A_: Any=None,A_: Any=True,A_: List[Any]=None,A_: Optional[int]=None,A_: List[Any]=None,A_: str=None,A_: Union[str, Any]=None,A_: List[Any]=" / ",A_: Union[str, Any]=" // ",A_: List[Any]=5,A_: Optional[int]=300,A_: Tuple=768,A_: Tuple=8,A_: Optional[Any]="wiki_dpr",A_: int="train",A_: Union[str, Any]="compressed",A_: Optional[int]=None,A_: List[Any]=None,A_: List[str]=False,A_: List[str]=False,A_: str=0.0,A_: List[Any]=True,A_: Tuple=False,A_: int=False,A_: Dict=False,A_: Tuple=True,A_: int=None,**A_: Optional[int],): '''simple docstring''' super().__init__( bos_token_id=A_,pad_token_id=A_,eos_token_id=A_,decoder_start_token_id=A_,forced_eos_token_id=A_,is_encoder_decoder=A_,prefix=A_,vocab_size=A_,**A_,) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" __UpperCamelCase = kwargs.pop('question_encoder' ) __UpperCamelCase = question_encoder_config.pop('model_type' ) __UpperCamelCase = kwargs.pop('generator' ) __UpperCamelCase = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig __UpperCamelCase = AutoConfig.for_model(A_,**A_ ) __UpperCamelCase = AutoConfig.for_model(A_,**A_ ) __UpperCamelCase = reduce_loss __UpperCamelCase = label_smoothing __UpperCamelCase = exclude_bos_score __UpperCamelCase = do_marginalize __UpperCamelCase = title_sep __UpperCamelCase = doc_sep __UpperCamelCase = n_docs __UpperCamelCase = max_combined_length __UpperCamelCase = dataset __UpperCamelCase = dataset_split __UpperCamelCase = index_name __UpperCamelCase = retrieval_vector_size __UpperCamelCase = retrieval_batch_size __UpperCamelCase = passages_path __UpperCamelCase = index_path __UpperCamelCase = use_dummy_dataset __UpperCamelCase = output_retrieved __UpperCamelCase = do_deduplication __UpperCamelCase = use_cache if self.forced_eos_token_id is None: __UpperCamelCase = getattr(self.generator,'forced_eos_token_id',A_ ) @classmethod def snake_case_ ( cls: Any,A_: PretrainedConfig,A_: PretrainedConfig,**A_: int ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict(),generator=generator_config.to_dict(),**A_ ) def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.question_encoder.to_dict() __UpperCamelCase = self.generator.to_dict() __UpperCamelCase = self.__class__.model_type return output
310
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class snake_case ( _a ): SCREAMING_SNAKE_CASE_ : Any = """roberta""" def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=5_0_2_6_5 , UpperCamelCase__ : Union[str, Any]=7_6_8 , UpperCamelCase__ : List[Any]=1_2 , UpperCamelCase__ : int=1_2 , UpperCamelCase__ : Optional[int]=3_0_7_2 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=5_1_2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Optional[int] , )-> str: '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_) __lowerCAmelCase: Dict = vocab_size __lowerCAmelCase: Any = hidden_size __lowerCAmelCase: str = num_hidden_layers __lowerCAmelCase: Optional[int] = num_attention_heads __lowerCAmelCase: Optional[int] = hidden_act __lowerCAmelCase: Tuple = intermediate_size __lowerCAmelCase: List[str] = hidden_dropout_prob __lowerCAmelCase: List[Any] = attention_probs_dropout_prob __lowerCAmelCase: Optional[int] = max_position_embeddings __lowerCAmelCase: Any = type_vocab_size __lowerCAmelCase: str = initializer_range __lowerCAmelCase: Dict = layer_norm_eps __lowerCAmelCase: Optional[Any] = position_embedding_type __lowerCAmelCase: List[str] = use_cache __lowerCAmelCase: Optional[int] = classifier_dropout class snake_case ( _a ): @property def lowercase_ ( self : Any)-> Union[str, Any]: '''simple docstring''' if self.task == "multiple-choice": __lowerCAmelCase: Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase: Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ])
217
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __lowerCamelCase (_a ): _lowercase = """M-CLIP""" def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ): '''simple docstring''' __UpperCamelCase = transformerDimSize __UpperCamelCase = imageDimSize super().__init__(**A_ ) class __lowerCamelCase (_a ): _lowercase = MCLIPConfig def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ): '''simple docstring''' super().__init__(A_,*A_,**A_ ) __UpperCamelCase = XLMRobertaModel(A_ ) __UpperCamelCase = torch.nn.Linear( in_features=config.transformerDimensions,out_features=config.numDims ) def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ): '''simple docstring''' __UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0] __UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(A_ ), embs
310
0
'''simple docstring''' import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging __a = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] __a = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = " Hello world! cécé herlolip" __a = [ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"), ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"), ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"), ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"), ] def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : int = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Any = dct.pop(_lowercase ) snake_case__ : Any = val def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[int] = torch.load(_lowercase , map_location="""cpu""" ) snake_case__ : Optional[Any] = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ , snake_case__ : List[str] = emb.weight.shape snake_case__ : Optional[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) snake_case__ : List[str] = emb.weight.data return lin_layer @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Tuple: if not os.path.exists(_lowercase ): snake_case__ : Any = torch.hub.load("""pytorch/fairseq""" , _lowercase ).eval() else: snake_case__ : int = load_xsum_checkpoint(_lowercase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case__ : Union[str, Any] = checkpoint_path.replace(""".""" , """-""" ) snake_case__ : int = BartConfig.from_pretrained(_lowercase ) snake_case__ : Dict = bart.encode(_lowercase ).unsqueeze(0 ) snake_case__ : Dict = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase , return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(_lowercase , _lowercase ).all(): raise ValueError( f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": snake_case__ : Optional[Any] = bart.state_dict() remove_ignore_keys_(_lowercase ) snake_case__ : Any = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) snake_case__ : Union[str, Any] = BartForSequenceClassification(_lowercase ).eval() model.load_state_dict(_lowercase ) snake_case__ : List[str] = bart.predict("""mnli""" , _lowercase , return_logits=_lowercase ) snake_case__ : Any = model(_lowercase )[0] # logits else: # no classification heads to worry about snake_case__ : int = bart.model.state_dict() remove_ignore_keys_(_lowercase ) snake_case__ : List[Any] = state_dict["""decoder.embed_tokens.weight"""] snake_case__ : Union[str, Any] = bart.extract_features(_lowercase ) if hf_checkpoint_name == "facebook/bart-large": snake_case__ : Optional[int] = BartModel(_lowercase ).eval() model.load_state_dict(_lowercase ) snake_case__ : Optional[Any] = model(_lowercase ).model[0] else: snake_case__ : List[Any] = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt model.model.load_state_dict(_lowercase ) if hasattr(_lowercase , """lm_head""" ): snake_case__ : int = make_linear_from_emb(model.model.shared ) snake_case__ : Any = model.model(_lowercase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) __a = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
35
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __lowerCamelCase : _lowercase = XGLMConfig _lowercase = {} _lowercase = """gelu""" def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = ffn_dim __UpperCamelCase = activation_function __UpperCamelCase = activation_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = max_position_embeddings __UpperCamelCase = initializer_range __UpperCamelCase = None __UpperCamelCase = 0 __UpperCamelCase = 2 __UpperCamelCase = 1 def snake_case_ ( self: Dict ): '''simple docstring''' return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = self.get_config() __UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 ) return ( config, input_ids, input_mask, head_mask, ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,) def snake_case_ ( self: int ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ), ( __UpperCamelCase ), ( __UpperCamelCase ), ( __UpperCamelCase ), ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class __lowerCamelCase (_a , _a , unittest.TestCase ): _lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _lowercase = (TFXGLMForCausalLM,) if is_tf_available() else () _lowercase = ( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = TFXGLMModelTester(self ) __UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 ) def snake_case_ ( self: Any ): '''simple docstring''' self.config_tester.run_common_tests() @slow def snake_case_ ( self: Any ): '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFXGLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def snake_case_ ( self: Tuple ): '''simple docstring''' super().test_resize_token_embeddings() @require_tf class __lowerCamelCase (unittest.TestCase ): @slow def snake_case_ ( self: Optional[Any],A_: int=True ): '''simple docstring''' __UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on __UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(),A_ ) @slow def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' ) __UpperCamelCase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] ) __UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ ) __UpperCamelCase = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(A_,A_ ) @slow def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __UpperCamelCase = 'left' # use different length sentences to test batching __UpperCamelCase = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ ) __UpperCamelCase = inputs['input_ids'] __UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 ) __UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids __UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 ) __UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids __UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 ) __UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ ) __UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ ) __UpperCamelCase = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(A_,A_ ) self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
310
0
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __snake_case = '''hf-internal-testing/tiny-random-bert''' __snake_case = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') __snake_case = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : int =cached_file(A_ , A_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(A_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) ) with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f: UpperCAmelCase : Tuple =f.read() self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) ) self.assertTrue(os.path.isfile(A_ ) ) # File is cached at the same place the second time. UpperCAmelCase : int =cached_file(A_ , A_ ) self.assertEqual(A_ , A_ ) # Using a specific revision to test the full commit hash. UpperCAmelCase : List[str] =cached_file(A_ , A_ , revision='''9b8c223''' ) self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ): UpperCAmelCase : Any =cached_file('''tiny-random-bert''' , A_ ) with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ): UpperCAmelCase : List[Any] =cached_file(A_ , A_ , revision='''aaaa''' ) with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ): UpperCAmelCase : str =cached_file(A_ , '''conf''' ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ): UpperCAmelCase : Dict =cached_file(A_ , '''conf''' ) with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f: UpperCAmelCase : Union[str, Any] =f.read() self.assertTrue(os.path.isfile(os.path.join(A_ , '''.no_exist''' , A_ , '''conf''' ) ) ) UpperCAmelCase : Union[str, Any] =cached_file(A_ , '''conf''' , _raise_exceptions_for_missing_entries=A_ ) self.assertIsNone(A_ ) UpperCAmelCase : Optional[Any] =cached_file(A_ , '''conf''' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ ) self.assertIsNone(A_ ) UpperCAmelCase : Optional[int] =mock.Mock() UpperCAmelCase : List[Any] =500 UpperCAmelCase : str ={} UpperCAmelCase : str =HTTPError UpperCAmelCase : List[str] ={} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head: UpperCAmelCase : List[str] =cached_file(A_ , '''conf''' , _raise_exceptions_for_connection_errors=A_ ) self.assertIsNone(A_ ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , A_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , A_ , revision='''ahaha''' ) UpperCAmelCase : List[Any] =get_file_from_repo('''bert-base-cased''' , A_ ) # The name is the cached name which is not very easy to test, so instead we load the content. UpperCAmelCase : Any =json.loads(open(A_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase : List[str] =Path(A_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(A_ , '''a.txt''' ) , str(A_ ) ) self.assertIsNone(get_file_from_repo(A_ , '''b.txt''' ) )
348
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: __snake_case = json.load(f) @require_torch class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: int,A_: int ): '''simple docstring''' return FSMTTokenizer.from_pretrained(A_ ) def snake_case_ ( self: Dict,A_: int ): '''simple docstring''' __UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 2_6.0], ['ru-en', 2_2.0], ['en-de', 2_2.0], ['de-en', 2_9.0], ] ) @slow def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = F'''facebook/wmt19-{pair}''' __UpperCamelCase = self.get_tokenizer(A_ ) __UpperCamelCase = self.get_model(A_ ) __UpperCamelCase = bleu_data[pair]['src'] __UpperCamelCase = bleu_data[pair]['tgt'] __UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ ) __UpperCamelCase = model.generate( input_ids=batch.input_ids,num_beams=8,) __UpperCamelCase = tokenizer.batch_decode( A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ ) __UpperCamelCase = calculate_bleu(A_,A_ ) print(A_ ) self.assertGreaterEqual(scores['bleu'],A_ )
310
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = {} class A ( _a ): __snake_case = 'llama' __snake_case = ['past_key_values'] def __init__( self, UpperCamelCase__=3_2000, UpperCamelCase__=4096, UpperCamelCase__=1_1008, UpperCamelCase__=32, UpperCamelCase__=32, UpperCamelCase__=None, UpperCamelCase__="silu", UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-6, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=1, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=False, UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = hidden_size lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = num_key_value_heads lowerCAmelCase_ = hidden_act lowerCAmelCase_ = initializer_range lowerCAmelCase_ = rms_norm_eps lowerCAmelCase_ = pretraining_tp lowerCAmelCase_ = use_cache lowerCAmelCase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, tie_word_embeddings=A_, **A_, ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling, A_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) lowerCAmelCase_ = self.rope_scaling.get('''type''', A_ ) lowerCAmelCase_ = self.rope_scaling.get('''factor''', A_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(A_, A_ ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
278
def _A ( _lowercase ) -> list[int]: """simple docstring""" if length <= 0 or not isinstance(_lowercase , _lowercase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(_lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=1_0))
310
0
def __lowerCamelCase ( __a :List[str] ) -> str: """simple docstring""" if number > 0: raise ValueError("""input must be a negative integer""" ) A__ = len(bin(_lowercase )[3:] ) A__ = bin(abs(_lowercase ) - (1 << binary_number_length) )[3:] A__ = ( ( """1""" + """0""" * (binary_number_length - len(_lowercase )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
274
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = MgpstrTokenizer _lowercase = False _lowercase = {} _lowercase = False def snake_case_ ( self: int ): '''simple docstring''' super().setUp() # fmt: off __UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on __UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) ) __UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) def snake_case_ ( self: Dict,**A_: Tuple ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ ) def snake_case_ ( self: List[Any],A_: Optional[Any] ): '''simple docstring''' __UpperCamelCase = 'tester' __UpperCamelCase = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def snake_case_ ( self: str ): '''simple docstring''' pass def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) __UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ ) self.assertEqual(len(A_ ),1 ) __UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ ) self.assertTrue(special_token not in decoded ) def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ ) __UpperCamelCase = tokenizer.tokenize(A_ ) __UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ ) __UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ ) self.assertListEqual(A_,A_ ) __UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ ) self.assertNotEqual(len(A_ ),0 ) __UpperCamelCase = tokenizer.decode(A_ ) self.assertIsInstance(A_,A_ ) self.assertEqual(text_a.replace(' ','' ),A_ ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def snake_case_ ( self: int ): '''simple docstring''' pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def snake_case_ ( self: List[str] ): '''simple docstring''' pass
310
0
'''simple docstring''' class a : def __init__( self , __magic_name__ ) -> str: _a = len(A_ ) _a = [0] * len_array if len_array > 0: _a = array[0] for i in range(1 , A_ ): _a = self.prefix_sum[i - 1] + array[i] def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[Any]: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]: _a = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(A_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
168
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( """The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , ) class __lowerCamelCase (_a ): _lowercase = RobertaConfig _lowercase = """roberta""" def __init__( self: Union[str, Any],A_: List[str] ): '''simple docstring''' super().__init__(A_ ) __UpperCamelCase = RobertaEmbeddings(A_ ) self.init_weights() @add_start_docstrings( """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. """ , _a , ) class __lowerCamelCase (_a ): _lowercase = RobertaConfig _lowercase = """roberta""" def __init__( self: Any,A_: int ): '''simple docstring''' super().__init__(A_ ) __UpperCamelCase = config.num_labels __UpperCamelCase = config.num_hidden_layers __UpperCamelCase = DeeRobertaModel(A_ ) __UpperCamelCase = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels ) @add_start_docstrings_to_model_forward(A_ ) def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,): '''simple docstring''' __UpperCamelCase = self.num_layers try: __UpperCamelCase = self.roberta( A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,) __UpperCamelCase = outputs[1] __UpperCamelCase = self.dropout(A_ ) __UpperCamelCase = self.classifier(A_ ) __UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase = e.message __UpperCamelCase = e.exit_layer __UpperCamelCase = outputs[0] if not self.training: __UpperCamelCase = entropy(A_ ) __UpperCamelCase = [] __UpperCamelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase = MSELoss() __UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) ) else: __UpperCamelCase = CrossEntropyLoss() __UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) ) # work with highway exits __UpperCamelCase = [] for highway_exit in outputs[-1]: __UpperCamelCase = highway_exit[0] if not self.training: highway_logits_all.append(A_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase = MSELoss() __UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) ) else: __UpperCamelCase = CrossEntropyLoss() __UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) ) highway_losses.append(A_ ) if train_highway: __UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase = (loss,) + outputs if not self.training: __UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
310
0
'''simple docstring''' from functools import reduce __snake_case = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def a ( __a = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda __a , __a : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
97
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __lowerCamelCase : @staticmethod def snake_case_ ( *A_: Optional[Any],**A_: Tuple ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __lowerCamelCase (unittest.TestCase ): _lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 ) self.assertGreater(len(A_ ),0 ) for detected_object in outputs: self.assertEqual( A_,{ 'score': ANY(A_ ), 'label': ANY(A_ ), 'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )}, },) import datasets __UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' ) __UpperCamelCase = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] __UpperCamelCase = object_detector(A_,threshold=0.0 ) self.assertEqual(len(A_ ),len(A_ ) ) for outputs in batch_outputs: self.assertGreater(len(A_ ),0 ) for detected_object in outputs: self.assertEqual( A_,{ 'score': ANY(A_ ), 'label': ANY(A_ ), 'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )}, },) @require_tf @unittest.skip('Object detection not implemented in TF' ) def snake_case_ ( self: str ): '''simple docstring''' pass @require_torch def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3' __UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ ) __UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ],) __UpperCamelCase = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ],threshold=0.0,) self.assertEqual( nested_simplify(A_,decimals=4 ),[ [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], ],) @require_torch @slow def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = 'facebook/detr-resnet-50' __UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ ) __UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ],) __UpperCamelCase = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ],) @require_torch @slow def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = 'facebook/detr-resnet-50' __UpperCamelCase = pipeline('object-detection',model=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ],) __UpperCamelCase = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ],) @require_torch @slow def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = 0.9_9_8_5 __UpperCamelCase = 'facebook/detr-resnet-50' __UpperCamelCase = pipeline('object-detection',model=A_ ) __UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ],) @require_torch @require_pytesseract @slow def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd' __UpperCamelCase = 0.9_9_9_3 __UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ ) __UpperCamelCase = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(A_,decimals=4 ),[ {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, ],)
310
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __SCREAMING_SNAKE_CASE :Any = R''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_a ) class A_ ( _a ): _lowerCamelCase : List[Any] = """rag""" _lowerCamelCase : Union[str, Any] = True def __init__( self : Tuple , snake_case_ : Any=None , snake_case_ : Any=True , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=" / " , snake_case_ : Union[str, Any]=" // " , snake_case_ : List[Any]=5 , snake_case_ : Optional[int]=3_0_0 , snake_case_ : Tuple=7_6_8 , snake_case_ : Tuple=8 , snake_case_ : Optional[Any]="wiki_dpr" , snake_case_ : int="train" , snake_case_ : Union[str, Any]="compressed" , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , snake_case_ : List[str]=False , snake_case_ : List[str]=False , snake_case_ : str=0.0 , snake_case_ : List[Any]=True , snake_case_ : Tuple=False , snake_case_ : int=False , snake_case_ : Dict=False , snake_case_ : Tuple=True , snake_case_ : int=None , **snake_case_ : Optional[int] , ): super().__init__( bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _UpperCAmelCase = kwargs.pop("question_encoder" ) _UpperCAmelCase = question_encoder_config.pop("model_type" ) _UpperCAmelCase = kwargs.pop("generator" ) _UpperCAmelCase = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _UpperCAmelCase = AutoConfig.for_model(A_ , **A_ ) _UpperCAmelCase = AutoConfig.for_model(A_ , **A_ ) _UpperCAmelCase = reduce_loss _UpperCAmelCase = label_smoothing _UpperCAmelCase = exclude_bos_score _UpperCAmelCase = do_marginalize _UpperCAmelCase = title_sep _UpperCAmelCase = doc_sep _UpperCAmelCase = n_docs _UpperCAmelCase = max_combined_length _UpperCAmelCase = dataset _UpperCAmelCase = dataset_split _UpperCAmelCase = index_name _UpperCAmelCase = retrieval_vector_size _UpperCAmelCase = retrieval_batch_size _UpperCAmelCase = passages_path _UpperCAmelCase = index_path _UpperCAmelCase = use_dummy_dataset _UpperCAmelCase = output_retrieved _UpperCAmelCase = do_deduplication _UpperCAmelCase = use_cache if self.forced_eos_token_id is None: _UpperCAmelCase = getattr(self.generator , "forced_eos_token_id" , A_ ) @classmethod def lowercase ( cls : Any , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : int ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_ ) def lowercase ( self : Tuple ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.question_encoder.to_dict() _UpperCAmelCase = self.generator.to_dict() _UpperCAmelCase = self.__class__.model_type return output
22
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class __lowerCamelCase (_a ): _lowercase = """xlm-roberta""" def __init__( self: Union[str, Any],A_: Union[str, Any]=3_0522,A_: Dict=768,A_: Union[str, Any]=12,A_: Any=12,A_: str=3072,A_: Union[str, Any]="gelu",A_: str=0.1,A_: Optional[int]=0.1,A_: List[Any]=512,A_: Optional[Any]=2,A_: Dict=0.0_2,A_: List[Any]=1E-12,A_: Optional[int]=1,A_: str=0,A_: str=2,A_: Optional[Any]="absolute",A_: Union[str, Any]=True,A_: int=None,**A_: Optional[Any],): '''simple docstring''' super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
310
0
from __future__ import annotations import numpy as np def lowerCAmelCase_ ( __a ) -> Optional[int]: """simple docstring""" return np.maximum(0 , _lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
10
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __snake_case = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __lowerCamelCase (_a ): _lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} ) _lowercase = field( default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) _lowercase = field( default=_a , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) _lowercase = field( default=_a , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) _lowercase = field( default=_a , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(A_,A_ ): __UpperCamelCase = v.to_dict() return d
310
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase__ : Optional[Any] = datasets.utils.logging.get_logger(__name__) lowercase__ : List[str] = ["names", "prefix"] lowercase__ : Optional[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowercase__ : Tuple = ["encoding_errors", "on_bad_lines"] lowercase__ : List[Any] = ["date_format"] @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCAmelCase_ = ''',''' lowerCAmelCase_ = None lowerCAmelCase_ = '''infer''' lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = True lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = False lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = None lowerCAmelCase_ = '''.''' lowerCAmelCase_ = None lowerCAmelCase_ = '''"''' lowerCAmelCase_ = 0 lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = 0 lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = None lowerCAmelCase_ = 1_0000 lowerCAmelCase_ = None lowerCAmelCase_ = '''strict''' lowerCAmelCase_ = '''error''' lowerCAmelCase_ = None def snake_case__ ( self : Optional[int] ): """simple docstring""" if self.delimiter is not None: snake_case_ = self.delimiter if self.column_names is not None: snake_case_ = self.column_names @property def snake_case__ ( self : int ): """simple docstring""" snake_case_ = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCAmelCase_ = CsvConfig def snake_case__ ( self : Dict ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def snake_case__ ( self : Union[str, Any] , __lowercase : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) snake_case_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ , (str, list, tuple) ): snake_case_ = data_files if isinstance(A_ , A_ ): snake_case_ = [files] snake_case_ = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ = [] for split_name, files in data_files.items(): if isinstance(A_ , A_ ): snake_case_ = [files] snake_case_ = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={"files": files} ) ) return splits def snake_case__ ( self : List[Any] , __lowercase : pa.Table ): """simple docstring""" if self.config.features is not None: snake_case_ = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast snake_case_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example snake_case_ = table_cast(A_ , A_ ) return pa_table def snake_case__ ( self : int , __lowercase : Optional[Any] ): """simple docstring""" snake_case_ = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str snake_case_ = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): snake_case_ = pd.read_csv(A_ , iterator=A_ , dtype=A_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): snake_case_ = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(f"Failed to read file \'{file}\' with error {type(A_ )}: {e}" ) raise
187
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _A ( _lowercase ) -> Dict: """simple docstring""" if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ): return False return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule ) def _A ( _lowercase , _lowercase = True ) -> Optional[int]: """simple docstring""" __UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __UpperCamelCase = is_compiled_module(_lowercase ) if is_compiled: __UpperCamelCase = model __UpperCamelCase = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_lowercase , _lowercase ): __UpperCamelCase = model.module if not keep_fpaa_wrapper: __UpperCamelCase = getattr(_lowercase , 'forward' ) __UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase ) if original_forward is not None: while hasattr(_lowercase , '__wrapped__' ): __UpperCamelCase = forward.__wrapped__ if forward == original_forward: break __UpperCamelCase = forward if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ): convert_model(_lowercase , to_transformer_engine=_lowercase ) if is_compiled: __UpperCamelCase = model __UpperCamelCase = compiled_model return model def _A ( ) -> Any: """simple docstring""" PartialState().wait_for_everyone() def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(_lowercase , _lowercase ) elif PartialState().local_process_index == 0: torch.save(_lowercase , _lowercase ) @contextmanager def _A ( **_lowercase ) -> Union[str, Any]: """simple docstring""" for key, value in kwargs.items(): __UpperCamelCase = str(_lowercase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _A ( _lowercase ) -> Tuple: """simple docstring""" if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ): __UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase ) if hasattr(_lowercase , '__qualname__' ): return obj.__qualname__ if hasattr(_lowercase , '__name__' ): return obj.__name__ return str(_lowercase ) def _A ( _lowercase , _lowercase ) -> Any: """simple docstring""" for key, value in source.items(): if isinstance(_lowercase , _lowercase ): __UpperCamelCase = destination.setdefault(_lowercase , {} ) merge_dicts(_lowercase , _lowercase ) else: __UpperCamelCase = value return destination def _A ( _lowercase = None ) -> bool: """simple docstring""" if port is None: __UpperCamelCase = 2_95_00 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
310
0
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCAmelCase__ : List[str] = logging.get_logger(__name__) def lowerCamelCase__ ( a ) -> List[List[ImageInput]]: if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowercase ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class UpperCAmelCase ( _a ): '''simple docstring''' __UpperCamelCase : Dict = ['''pixel_values'''] def __init__( self : Optional[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Union[str, Any] , ): """simple docstring""" super().__init__(**A_ ) _A: Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6} _A: Tuple = get_size_dict(A_ , default_to_square=A_ ) _A: Any = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _A: Union[str, Any] = get_size_dict(A_ , param_name='''crop_size''' ) _A: Dict = do_resize _A: Tuple = size _A: Any = do_center_crop _A: Optional[int] = crop_size _A: Optional[int] = resample _A: Any = do_rescale _A: List[Any] = rescale_factor _A: str = offset _A: Tuple = do_normalize _A: Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _A: Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def __magic_name__ ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ): """simple docstring""" _A: str = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" in size: _A: Union[str, Any] = get_resize_output_image_size(A_ , size['''shortest_edge'''] , default_to_square=A_ ) elif "height" in size and "width" in size: _A: Tuple = (size['''height'''], size['''width''']) else: raise ValueError(F"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ): """simple docstring""" _A: List[Any] = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""" ) return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ ) def __magic_name__ ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" _A: Optional[Any] = image.astype(np.floataa ) if offset: _A: Tuple = image - (scale / 2) return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def __magic_name__ ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ): """simple docstring""" return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. _A: str = to_numpy_array(A_ ) if do_resize: _A: int = self.resize(image=A_ , size=A_ , resample=A_ ) if do_center_crop: _A: Optional[int] = self.center_crop(A_ , size=A_ ) if do_rescale: _A: Any = self.rescale(image=A_ , scale=A_ , offset=A_ ) if do_normalize: _A: str = self.normalize(image=A_ , mean=A_ , std=A_ ) _A: Any = to_channel_dimension_format(A_ , A_ ) return image def __magic_name__ ( self : List[str] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : List[Any] , ): """simple docstring""" _A: Union[str, Any] = do_resize if do_resize is not None else self.do_resize _A: Union[str, Any] = resample if resample is not None else self.resample _A: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop _A: Dict = do_rescale if do_rescale is not None else self.do_rescale _A: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _A: str = offset if offset is not None else self.offset _A: Any = do_normalize if do_normalize is not None else self.do_normalize _A: Union[str, Any] = image_mean if image_mean is not None else self.image_mean _A: Optional[int] = image_std if image_std is not None else self.image_std _A: Any = size if size is not None else self.size _A: Tuple = get_size_dict(A_ , default_to_square=A_ ) _A: Optional[int] = crop_size if crop_size is not None else self.crop_size _A: Union[str, Any] = get_size_dict(A_ , param_name='''crop_size''' ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) _A: List[Any] = make_batched(A_ ) _A: Optional[int] = [ [ self._preprocess_image( image=A_ , do_resize=A_ , size=A_ , resample=A_ , do_center_crop=A_ , crop_size=A_ , do_rescale=A_ , rescale_factor=A_ , offset=A_ , do_normalize=A_ , image_mean=A_ , image_std=A_ , data_format=A_ , ) for img in video ] for video in videos ] _A: Optional[int] = {'''pixel_values''': videos} return BatchFeature(data=A_ , tensor_type=A_ )
121
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __snake_case = logging.getLogger(__name__) def _A ( _lowercase , _lowercase ) -> Optional[int]: """simple docstring""" return (preds == labels).mean() @dataclass class __lowerCamelCase : _lowercase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _lowercase = field( default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _lowercase = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCamelCase : _lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) _lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} ) _lowercase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _lowercase = field( default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _A ( ) -> str: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _lowercase ) # Set seed set_seed(training_args.seed ) try: __UpperCamelCase = processors[data_args.task_name]() __UpperCamelCase = processor.get_labels() __UpperCamelCase = len(_lowercase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCamelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets __UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __UpperCamelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_lowercase ) -> Dict: __UpperCamelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_lowercase , p.label_ids )} # Data collator __UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __UpperCamelCase = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(_lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _lowercase , _lowercase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowercase ) return results def _A ( _lowercase ) -> List[Any]: """simple docstring""" main() if __name__ == "__main__": main()
310
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch __A = logging.get_logger(__name__) @dataclass class snake_case : def __init__( self : Tuple , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=6.0 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]="fp4" , UpperCamelCase__ : Optional[Any]=False , **UpperCamelCase__ : Any , )-> Any: '''simple docstring''' __lowerCAmelCase: List[Any] = load_in_abit __lowerCAmelCase: int = load_in_abit __lowerCAmelCase: List[Any] = llm_inta_threshold __lowerCAmelCase: Optional[Any] = llm_inta_skip_modules __lowerCAmelCase: str = llm_inta_enable_fpaa_cpu_offload __lowerCAmelCase: int = llm_inta_has_fpaa_weight __lowerCAmelCase: str = bnb_abit_quant_type __lowerCAmelCase: int = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __lowerCAmelCase: Optional[int] = torch.floataa elif isinstance(A_ , A_): __lowerCAmelCase: Tuple = getattr(A_ , A_) elif isinstance(A_ , torch.dtype): __lowerCAmelCase: int = bnb_abit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") self.post_init() def lowercase_ ( self : List[str])-> Union[str, Any]: '''simple docstring''' if not isinstance(self.llm_inta_threshold , A_): raise ValueError("llm_int8_threshold must be a float") if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , A_): raise ValueError("llm_int8_skip_modules must be a list of strings") if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , A_): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") if not isinstance(self.llm_inta_has_fpaa_weight , A_): raise ValueError("llm_int8_has_fp16_weight must be a boolean") if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") if not isinstance(self.bnb_abit_quant_type , A_): raise ValueError("bnb_4bit_quant_type must be a string") if not isinstance(self.bnb_abit_use_double_quant , A_): raise ValueError("bnb_4bit_use_double_quant must be a boolean") if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( "0.39.0"): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version") def lowercase_ ( self : List[str])-> int: '''simple docstring''' return self.load_in_abit or self.load_in_abit def lowercase_ ( self : Optional[int])-> List[str]: '''simple docstring''' if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def lowercase_ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any])-> List[str]: '''simple docstring''' __lowerCAmelCase: Any = cls(**A_) __lowerCAmelCase: List[Any] = [] for key, value in kwargs.items(): if hasattr(A_ , A_): setattr(A_ , A_ , A_) to_remove.append(A_) for key in to_remove: kwargs.pop(A_ , A_) if return_unused_kwargs: return config, kwargs else: return config def lowercase_ ( self : Any , UpperCamelCase__ : Union[str, os.PathLike])-> Optional[int]: '''simple docstring''' with open(A_ , "w" , encoding="utf-8") as writer: __lowerCAmelCase: int = self.to_dict() __lowerCAmelCase: Union[str, Any] = json.dumps(A_ , indent=2 , sort_keys=A_) + "\n" writer.write(A_) def lowercase_ ( self : Optional[Any])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: List[Any] = copy.deepcopy(self.__dict__) __lowerCAmelCase: List[str] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] return output def __repr__( self : str)-> Dict: '''simple docstring''' return f"{self.__class__.__name__} {self.to_json_string()}" def lowercase_ ( self : int , UpperCamelCase__ : bool = True)-> Dict: '''simple docstring''' if use_diff is True: __lowerCAmelCase: Optional[int] = self.to_diff_dict() else: __lowerCAmelCase: int = self.to_dict() return json.dumps(A_ , indent=2 , sort_keys=A_) + "\n" def lowercase_ ( self : Any)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Any = self.to_dict() # get the default config dict __lowerCAmelCase: int = BitsAndBytesConfig().to_dict() __lowerCAmelCase: Optional[int] = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __lowerCAmelCase: Any = value return serializable_config_dict
217
import os def _A ( ) -> Tuple: """simple docstring""" with open(os.path.dirname(_lowercase ) + '/p022_names.txt' ) as file: __UpperCamelCase = str(file.readlines()[0] ) __UpperCamelCase = names.replace('"' , '' ).split(',' ) names.sort() __UpperCamelCase = 0 __UpperCamelCase = 0 for i, name in enumerate(_lowercase ): for letter in name: name_score += ord(_lowercase ) - 64 total_score += (i + 1) * name_score __UpperCamelCase = 0 return total_score if __name__ == "__main__": print(solution())
310
0
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger a : Dict = get_logger(__name__) a : Optional[Any] = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n" class UpperCamelCase__ : """simple docstring""" @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case ): '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class UpperCamelCase__ : """simple docstring""" @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case ): '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" @add_start_docstrings(snake_case ) def __call__( self , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' for processor in self: UpperCAmelCase : str = inspect.signature(processor.__call__ ).parameters if len(snake_case ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f"Make sure that all the required parameters: {list(function_args.keys() )} for " f"{processor.__class__} are passed to the logits processor." ) UpperCAmelCase : Tuple = processor(snake_case , snake_case , snake_case , **snake_case ) else: UpperCAmelCase : Optional[Any] = processor(snake_case , snake_case , snake_case ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' if not isinstance(snake_case , snake_case ) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" ) UpperCAmelCase : Any = temperature def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : int = scores / self.temperature return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = -float("Inf" ) , snake_case = 1 ): '''simple docstring''' if not isinstance(snake_case , snake_case ) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" ) if not isinstance(snake_case , snake_case ) or (min_tokens_to_keep < 1): raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" ) UpperCAmelCase : Optional[int] = top_p UpperCAmelCase : Optional[int] = filter_value UpperCAmelCase : Tuple = min_tokens_to_keep def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = lax.top_k(snake_case , scores.shape[-1] ) UpperCAmelCase : Union[str, Any] = jnp.full_like(snake_case , self.filter_value ) UpperCAmelCase : Optional[int] = jax.nn.softmax(snake_case , axis=-1 ).cumsum(axis=-1 ) UpperCAmelCase : Tuple = cumulative_probs < self.top_p # include the token that is higher than top_p as well UpperCAmelCase : Optional[Any] = jnp.roll(snake_case , 1 ) score_mask |= score_mask.at[:, 0].set(snake_case ) # min tokens to keep UpperCAmelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case ) UpperCAmelCase : Union[str, Any] = jnp.where(snake_case , snake_case , snake_case ) UpperCAmelCase : int = jax.lax.sort_key_val(snake_case , snake_case )[-1] return next_scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = -float("Inf" ) , snake_case = 1 ): '''simple docstring''' if not isinstance(snake_case , snake_case ) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" ) UpperCAmelCase : List[Any] = max(snake_case , snake_case ) UpperCAmelCase : Tuple = filter_value def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int = scores.shape UpperCAmelCase : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value ) UpperCAmelCase : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check UpperCAmelCase , UpperCAmelCase : Union[str, Any] = lax.top_k(snake_case , snake_case ) UpperCAmelCase : Union[str, Any] = jnp.broadcast_to((jnp.arange(snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() UpperCAmelCase : Tuple = topk_scores.flatten() UpperCAmelCase : List[Any] = topk_indices.flatten() + shift UpperCAmelCase : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case ) UpperCAmelCase : int = next_scores_flat.reshape(snake_case , snake_case ) return next_scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = bos_token_id def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase : List[Any] = 1 - jnp.bool_(cur_len - 1 ) UpperCAmelCase : Dict = jnp.where(snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = max_length UpperCAmelCase : List[str] = eos_token_id def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 ) UpperCAmelCase : str = jnp.where(snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' if not isinstance(snake_case , snake_case ) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" ) if not isinstance(snake_case , snake_case ) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" ) UpperCAmelCase : Optional[Any] = min_length UpperCAmelCase : List[str] = eos_token_id def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) UpperCAmelCase : Tuple = jnp.where(snake_case , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = list(snake_case ) UpperCAmelCase : List[str] = begin_index def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index ) UpperCAmelCase : str = jnp.where(snake_case , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = list(snake_case ) def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : str = dict(snake_case ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. UpperCAmelCase : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: UpperCAmelCase : str = force_token_array.at[index].set(snake_case ) UpperCAmelCase : List[Any] = jnp.intaa(snake_case ) def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' def _force_token(snake_case ): UpperCAmelCase : Optional[int] = scores.shape[0] UpperCAmelCase : Optional[int] = self.force_token_array[generation_idx] UpperCAmelCase : Tuple = jnp.ones_like(snake_case , dtype=scores.dtype ) * -float("inf" ) UpperCAmelCase : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) UpperCAmelCase : Optional[int] = lax.dynamic_update_slice(snake_case , snake_case , (0, current_token) ) return new_scores UpperCAmelCase : Union[str, Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case ) , lambda: scores , ) , ) return scores class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Any = generate_config.eos_token_id UpperCAmelCase : int = generate_config.no_timestamps_token_id UpperCAmelCase : List[str] = generate_config.no_timestamps_token_id + 1 UpperCAmelCase : str = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(snake_case , "max_initial_timestamp_index" ): UpperCAmelCase : str = generate_config.max_initial_timestamp_index else: UpperCAmelCase : List[Any] = model_config.vocab_size if self.max_initial_timestamp_index is None: UpperCAmelCase : Optional[Any] = model_config.vocab_size def __call__( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(snake_case , snake_case ): UpperCAmelCase : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , snake_case , snake_case ) UpperCAmelCase : Dict = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case , ) UpperCAmelCase : Any = jnp.where((cur_len - self.begin_index) < 2 , snake_case , snake_case ) UpperCAmelCase : Union[str, Any] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case , snake_case , ) return jnp.where( snake_case , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case , ) UpperCAmelCase : Dict = jax.vmap(snake_case )(snake_case , snake_case ) UpperCAmelCase : Optional[int] = jnp.where(cur_len == self.begin_index , snake_case , snake_case ) UpperCAmelCase : Optional[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case , ) UpperCAmelCase : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index UpperCAmelCase : Tuple = jnp.where( snake_case , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case , ) # if sum of probability over timestamps is above any other token, sample timestamp UpperCAmelCase : List[Any] = jax.nn.log_softmax(snake_case , axis=-1 ) def handle_cumulative_probs(snake_case , snake_case ): UpperCAmelCase : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) UpperCAmelCase : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case , ) UpperCAmelCase : Union[str, Any] = jax.vmap(snake_case )(snake_case , snake_case ) return scores
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' from __future__ import annotations def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = len(__magic_name__ ) # We need to create solution object to save path. UpperCAmelCase : Tuple = [[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] UpperCAmelCase : List[str] = run_maze(__magic_name__ , 0 , 0 , __magic_name__ ) if solved: print("\n".join(str(__magic_name__ ) for row in solutions ) ) else: print("No solution exists!" ) return solved def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = len(__magic_name__ ) # Final check point. if i == j == (size - 1): UpperCAmelCase : int = 1 return True UpperCAmelCase : str = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Tuple = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : int = 1 # check for directions if ( run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ ) or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ ) or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ ) ): return True UpperCAmelCase : int = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "M-CLIP" def __init__( self , snake_case=1_0_2_4 , snake_case=7_6_8 , **snake_case ): '''simple docstring''' UpperCAmelCase : Any = transformerDimSize UpperCAmelCase : Optional[Any] = imageDimSize super().__init__(**snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = MCLIPConfig def __init__( self , snake_case , *snake_case , **snake_case ): '''simple docstring''' super().__init__(snake_case , *snake_case , **snake_case ) UpperCAmelCase : List[str] = XLMRobertaModel(snake_case ) UpperCAmelCase : Optional[int] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = self.transformer(input_ids=snake_case , attention_mask=snake_case )[0] UpperCAmelCase : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(snake_case ), embs
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = (CMStochasticIterativeScheduler,) SCREAMING_SNAKE_CASE__ : Optional[Any] = 10 def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = { "num_train_timesteps": 2_0_1, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**snake_case ) return config def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = 1_0 UpperCAmelCase : Tuple = self.get_scheduler_config() UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0](**snake_case ) scheduler.set_timesteps(snake_case ) UpperCAmelCase : Union[str, Any] = scheduler.timesteps[0] UpperCAmelCase : Union[str, Any] = scheduler.timesteps[1] UpperCAmelCase : Optional[int] = self.dummy_sample UpperCAmelCase : str = 0.1 * sample UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample UpperCAmelCase : Any = scheduler.step(snake_case , snake_case , snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A_ ( self ): '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case ) def A_ ( self ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.scheduler_classes[0] UpperCAmelCase : Dict = self.get_scheduler_config() UpperCAmelCase : List[str] = scheduler_class(**snake_case ) UpperCAmelCase : str = 1 scheduler.set_timesteps(snake_case ) UpperCAmelCase : Optional[int] = scheduler.timesteps UpperCAmelCase : Any = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = self.dummy_model() UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(snake_case ): # 1. scale model input UpperCAmelCase : Dict = scheduler.scale_model_input(snake_case , snake_case ) # 2. predict noise residual UpperCAmelCase : Tuple = model(snake_case , snake_case ) # 3. predict previous sample x_t-1 UpperCAmelCase : Any = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample UpperCAmelCase : int = pred_prev_sample UpperCAmelCase : Tuple = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 192.7614 ) < 1e-2 assert abs(result_mean.item() - 0.2510 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.scheduler_classes[0] UpperCAmelCase : Tuple = self.get_scheduler_config() UpperCAmelCase : Optional[int] = scheduler_class(**snake_case ) UpperCAmelCase : Dict = [1_0_6, 0] scheduler.set_timesteps(timesteps=snake_case ) UpperCAmelCase : List[Any] = scheduler.timesteps UpperCAmelCase : Tuple = torch.manual_seed(0 ) UpperCAmelCase : Any = self.dummy_model() UpperCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input UpperCAmelCase : List[str] = scheduler.scale_model_input(snake_case , snake_case ) # 2. predict noise residual UpperCAmelCase : str = model(snake_case , snake_case ) # 3. predict previous sample x_t-1 UpperCAmelCase : Union[str, Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample UpperCAmelCase : Any = pred_prev_sample UpperCAmelCase : List[str] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : int = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 347.6357 ) < 1e-2 assert abs(result_mean.item() - 0.4527 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase : int = self.get_scheduler_config() UpperCAmelCase : List[Any] = scheduler_class(**snake_case ) UpperCAmelCase : str = [3_9, 3_0, 1_2, 1_5, 0] with self.assertRaises(snake_case , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.scheduler_classes[0] UpperCAmelCase : Dict = self.get_scheduler_config() UpperCAmelCase : str = scheduler_class(**snake_case ) UpperCAmelCase : str = [3_9, 3_0, 1_2, 1, 0] UpperCAmelCase : List[str] = len(snake_case ) with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.scheduler_classes[0] UpperCAmelCase : Dict = self.get_scheduler_config() UpperCAmelCase : Optional[int] = scheduler_class(**snake_case ) UpperCAmelCase : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=snake_case )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import math def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 0 UpperCAmelCase : Any = 0 while num > 0: UpperCAmelCase : Optional[Any] = num % 8 UpperCAmelCase : Optional[Any] = octal + (remainder * math.floor(math.pow(10 , __magic_name__ ) )) counter += 1 UpperCAmelCase : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F"0o{int(__magic_name__ )}" def lowercase ( ): '''simple docstring''' print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = np.inf def set_batch_size(__magic_name__ ) -> None: nonlocal batch_size if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase : Optional[Any] = min(__magic_name__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase : Any = min(__magic_name__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__magic_name__ , __magic_name__ ) and feature.dtype == "binary": UpperCAmelCase : Optional[int] = min(__magic_name__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__magic_name__ , __magic_name__ ) return None if batch_size is np.inf else batch_size class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ): '''simple docstring''' super().__init__( snake_case , split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , num_proc=snake_case , **snake_case , ) UpperCAmelCase : Optional[Any] = path_or_paths if isinstance(snake_case , snake_case ) else {self.split: path_or_paths} UpperCAmelCase : str = _PACKAGED_DATASETS_MODULES["parquet"][1] UpperCAmelCase : List[Any] = Parquet( cache_dir=snake_case , data_files=snake_case , features=snake_case , hash=snake_case , **snake_case , ) def A_ ( self ): '''simple docstring''' if self.streaming: UpperCAmelCase : Optional[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase : int = None UpperCAmelCase : str = None UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[int] = None self.builder.download_and_prepare( download_config=snake_case , download_mode=snake_case , verification_mode=snake_case , base_path=snake_case , num_proc=self.num_proc , ) UpperCAmelCase : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=snake_case , in_memory=self.keep_in_memory ) return dataset class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = dataset UpperCAmelCase : Optional[int] = path_or_buf UpperCAmelCase : int = batch_size or get_writer_batch_size(dataset.features ) UpperCAmelCase : Dict = parquet_writer_kwargs def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: UpperCAmelCase : List[str] = self._write(file_obj=snake_case , batch_size=snake_case , **self.parquet_writer_kwargs ) else: UpperCAmelCase : Tuple = self._write(file_obj=self.path_or_buf , batch_size=snake_case , **self.parquet_writer_kwargs ) return written def A_ ( self , snake_case , snake_case , **snake_case ): '''simple docstring''' UpperCAmelCase : int = 0 UpperCAmelCase : str = parquet_writer_kwargs.pop("path_or_buf" , snake_case ) UpperCAmelCase : Optional[Any] = self.dataset.features.arrow_schema UpperCAmelCase : Any = pq.ParquetWriter(snake_case , schema=snake_case , **snake_case ) for offset in logging.tqdm( range(0 , len(self.dataset ) , snake_case ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): UpperCAmelCase : List[Any] = query_table( table=self.dataset._data , key=slice(snake_case , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(snake_case ) written += batch.nbytes writer.close() return written
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Optional[int] = {"tokenization_byt5": ["ByT5Tokenizer"]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' import datasets a : Union[str, Any] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n" a : Tuple = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n" a : List[str] = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n" def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' return {"accuracy": simple_accuracy(snake_case , snake_case )}
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : int = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup a : str = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def lowercase ( __magic_name__ = "mumbai" ): '''simple docstring''' UpperCAmelCase : Optional[int] = BeautifulSoup(requests.get(url + location ).content , "html.parser" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ): UpperCAmelCase : Dict = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip() UpperCAmelCase : Tuple = job.find("span" , {"class": "company"} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(F'Job {i:>2} is {job[0]} at {job[1]}')
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) a : Tuple = "bert-base-cased" a : List[Any] = "fp16" a : Any = "bf16" a : Optional[int] = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Optional[int] = dict( ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , ) def A_ ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case ): UpperCAmelCase : List[Any] = self.dist_env.copy() UpperCAmelCase : Any = f"{i + 1}" UpperCAmelCase : List[str] = strategy with mockenv_context(**snake_case ): UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def A_ ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case ): UpperCAmelCase : Dict = self.dist_env.copy() UpperCAmelCase : Tuple = prefetch_policy with mockenv_context(**snake_case ): UpperCAmelCase : int = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def A_ ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case ): UpperCAmelCase : Optional[int] = self.dist_env.copy() UpperCAmelCase : Union[str, Any] = state_dict_type with mockenv_context(**snake_case ): UpperCAmelCase : Optional[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = AutoModel.from_pretrained(snake_case ) for policy in FSDP_AUTO_WRAP_POLICY: UpperCAmelCase : Union[str, Any] = self.dist_env.copy() UpperCAmelCase : Dict = policy if policy == "TRANSFORMER_BASED_WRAP": UpperCAmelCase : List[Any] = "BertLayer" elif policy == "SIZE_BASED_WRAP": UpperCAmelCase : int = "2000" with mockenv_context(**snake_case ): UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) UpperCAmelCase : List[Any] = self.dist_env.copy() UpperCAmelCase : Union[str, Any] = "TRANSFORMER_BASED_WRAP" UpperCAmelCase : Optional[Any] = "T5Layer" with mockenv_context(**snake_case ): UpperCAmelCase : str = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case ) self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) ) UpperCAmelCase : Optional[int] = self.dist_env.copy() UpperCAmelCase : str = "SIZE_BASED_WRAP" UpperCAmelCase : List[str] = "0" with mockenv_context(**snake_case ): UpperCAmelCase : Any = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def A_ ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: UpperCAmelCase : Tuple = self.dist_env.copy() UpperCAmelCase : Tuple = mp_dtype with mockenv_context(**snake_case ): UpperCAmelCase : Any = Accelerator() if mp_dtype == "fp16": UpperCAmelCase : int = torch.floataa elif mp_dtype == "bf16": UpperCAmelCase : Optional[Any] = torch.bfloataa UpperCAmelCase : Union[str, Any] = MixedPrecision(param_dtype=snake_case , reduce_dtype=snake_case , buffer_dtype=snake_case ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , snake_case ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case ) def A_ ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: UpperCAmelCase : Optional[Any] = self.dist_env.copy() UpperCAmelCase : Union[str, Any] = str(snake_case ).lower() with mockenv_context(**snake_case ): UpperCAmelCase : str = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = 0.82 UpperCAmelCase : Dict = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] UpperCAmelCase : Union[str, Any] = { "multi_gpu_fp16": 3_2_0_0, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2_0_0_0, "fsdp_full_shard_transformer_based_wrap_fp16": 1_9_0_0, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } UpperCAmelCase : str = 1_6_0 UpperCAmelCase : List[Any] = 1_6_0 UpperCAmelCase : Optional[int] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = os.path.join(self.test_scripts_folder , "test_performance.py" ) UpperCAmelCase : Dict = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"] for config in self.performance_configs: UpperCAmelCase : Optional[Any] = cmd.copy() for i, strategy in enumerate(snake_case ): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "fp32" in config: cmd_config.append("--mixed_precision=no" ) else: cmd_config.append("--mixed_precision=fp16" ) if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = os.path.join(self.test_scripts_folder , "test_checkpointing.py" ) UpperCAmelCase : Optional[int] = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp", "--mixed_precision=fp16", "--fsdp_transformer_layer_cls_to_wrap=BertLayer", ] for i, strategy in enumerate(snake_case ): UpperCAmelCase : Tuple = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) if strategy != "FULL_SHARD": continue UpperCAmelCase : Union[str, Any] = len(snake_case ) for state_dict_type in FSDP_STATE_DICT_TYPE: UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", "--partial_train_epoch=1", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) UpperCAmelCase : Union[str, Any] = cmd_config[:-1] UpperCAmelCase : Optional[int] = os.path.join(self.tmpdir , "epoch_0" ) cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" ) UpperCAmelCase : Union[str, Any] = [ "accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): UpperCAmelCase : Union[str, Any] = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"] ) else: cmd_config.extend(["--mixed_precision=no"] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"] ) for i, strategy in enumerate(snake_case ): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() )
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' import unittest import numpy as np def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , ): '''simple docstring''' UpperCAmelCase : Dict = np.shape(__magic_name__ ) UpperCAmelCase : int = np.shape(__magic_name__ ) UpperCAmelCase : Dict = np.shape(__magic_name__ ) if shape_a[0] != shape_b[0]: UpperCAmelCase : int = ( "Expected the same number of rows for A and B. " F"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(__magic_name__ ) if shape_b[1] != shape_c[1]: UpperCAmelCase : Dict = ( "Expected the same number of columns for B and C. " F"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(__magic_name__ ) UpperCAmelCase : List[Any] = pseudo_inv if a_inv is None: try: UpperCAmelCase : str = np.linalg.inv(__magic_name__ ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase : str = np.array([[2, 1], [6, 3]] ) UpperCAmelCase : Dict = schur_complement(snake_case , snake_case , snake_case ) UpperCAmelCase : int = np.block([[a, b], [b.T, c]] ) UpperCAmelCase : Tuple = np.linalg.det(snake_case ) UpperCAmelCase : str = np.linalg.det(snake_case ) UpperCAmelCase : List[Any] = np.linalg.det(snake_case ) self.assertAlmostEqual(snake_case , det_a * det_s ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase : int = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase : Any = np.array([[2, 1], [6, 3]] ) with self.assertRaises(snake_case ): schur_complement(snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase : str = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(snake_case ): schur_complement(snake_case , snake_case , snake_case ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a : List[str] = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ["pixel_values"] def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 2_2_4} UpperCAmelCase : int = get_size_dict(snake_case , default_to_square=snake_case ) UpperCAmelCase : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} UpperCAmelCase : str = get_size_dict(snake_case , default_to_square=snake_case , param_name="crop_size" ) UpperCAmelCase : Any = do_resize UpperCAmelCase : List[Any] = size UpperCAmelCase : Optional[Any] = resample UpperCAmelCase : Union[str, Any] = do_center_crop UpperCAmelCase : Optional[int] = crop_size UpperCAmelCase : List[Any] = do_rescale UpperCAmelCase : Tuple = rescale_factor UpperCAmelCase : Tuple = do_normalize UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase : Any = do_convert_rgb def A_ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : Any = get_size_dict(snake_case , default_to_square=snake_case ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case ) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : Tuple = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ): '''simple docstring''' UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : int = size if size is not None else self.size UpperCAmelCase : Tuple = get_size_dict(snake_case , param_name="size" , default_to_square=snake_case ) UpperCAmelCase : Any = resample if resample is not None else self.resample UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size UpperCAmelCase : int = get_size_dict(snake_case , param_name="crop_size" , default_to_square=snake_case ) UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std UpperCAmelCase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase : Optional[Any] = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase : Tuple = [convert_to_rgb(snake_case ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase : Dict = [to_numpy_array(snake_case ) for image in images] if do_resize: UpperCAmelCase : str = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images] if do_center_crop: UpperCAmelCase : List[Any] = [self.center_crop(image=snake_case , size=snake_case ) for image in images] if do_rescale: UpperCAmelCase : List[Any] = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_normalize: UpperCAmelCase : int = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images] UpperCAmelCase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images] UpperCAmelCase : Union[str, Any] = {"pixel_values": images} return BatchFeature(data=snake_case , tensor_type=snake_case )
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a : Optional[Any] = 16 a : Dict = 32 def lowercase ( __magic_name__ , __magic_name__ = 16 , __magic_name__ = "bert-base-cased" ): '''simple docstring''' UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(__magic_name__ ) UpperCAmelCase : List[str] = load_dataset("glue" , "mrpc" ) def tokenize_function(__magic_name__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__magic_name__ , max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase : Optional[Any] = datasets.map( __magic_name__ , batched=__magic_name__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__magic_name__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase : str = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__magic_name__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__magic_name__ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(__magic_name__ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. UpperCAmelCase : List[Any] = DataLoader( tokenized_datasets["train"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) UpperCAmelCase : str = DataLoader( tokenized_datasets["validation"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) return train_dataloader, eval_dataloader def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase : str = config["lr"] UpperCAmelCase : List[str] = int(config["num_epochs"] ) UpperCAmelCase : Tuple = int(config["seed"] ) UpperCAmelCase : Dict = int(config["batch_size"] ) UpperCAmelCase : Optional[Any] = args.model_name_or_path set_seed(__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Tuple = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ ) # Instantiate optimizer UpperCAmelCase : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase : Any = optimizer_cls(params=model.parameters() , lr=__magic_name__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: UpperCAmelCase : str = 1 UpperCAmelCase : str = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase : Optional[Any] = get_linear_schedule_with_warmup( optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , ) else: UpperCAmelCase : Optional[int] = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = accelerator.prepare( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase : Optional[Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase : int = 0 # Now we train the model UpperCAmelCase : Tuple = evaluate.load("glue" , "mrpc" ) UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Any = {} for epoch in range(__magic_name__ , __magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): UpperCAmelCase : List[Any] = model(**__magic_name__ ) UpperCAmelCase : List[Any] = outputs.loss UpperCAmelCase : int = loss / gradient_accumulation_steps accelerator.backward(__magic_name__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCAmelCase : Tuple = 0 for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase : str = model(**__magic_name__ ) UpperCAmelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase , UpperCAmelCase : int = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__magic_name__ ) - 1: UpperCAmelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__magic_name__ , references=__magic_name__ , ) UpperCAmelCase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __magic_name__ ) UpperCAmelCase : Union[str, Any] = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: UpperCAmelCase : int = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(__magic_name__ , __magic_name__ ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=__magic_name__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__magic_name__ , ) parser.add_argument( "--output_dir" , type=__magic_name__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=__magic_name__ , default=__magic_name__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=__magic_name__ , default=3 , help="Number of train epochs." , ) UpperCAmelCase : Union[str, Any] = parser.parse_args() UpperCAmelCase : List[str] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(__magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' from __future__ import annotations def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) UpperCAmelCase : List[str] = number_of_bytes // partitions UpperCAmelCase : Dict = [] for i in range(__magic_name__ ): UpperCAmelCase : str = i * bytes_per_partition + 1 UpperCAmelCase : Dict = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F"{start_bytes}-{end_bytes}" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowercase ( __magic_name__ ): '''simple docstring''' return x + 2 class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = "x = 3" UpperCAmelCase : Dict = {} UpperCAmelCase : List[Any] = evaluate(snake_case , {} , state=snake_case ) assert result == 3 self.assertDictEqual(snake_case , {"x": 3} ) UpperCAmelCase : List[Any] = "x = y" UpperCAmelCase : str = {"y": 5} UpperCAmelCase : List[Any] = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case , {"x": 5, "y": 5} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "y = add_two(x)" UpperCAmelCase : List[Any] = {"x": 3} UpperCAmelCase : Tuple = evaluate(snake_case , {"add_two": add_two} , state=snake_case ) assert result == 5 self.assertDictEqual(snake_case , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: UpperCAmelCase : Any = evaluate(snake_case , {} , state=snake_case ) assert result is None assert "tried to execute add_two" in out.out def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = "x = 3" UpperCAmelCase : List[str] = {} UpperCAmelCase : Tuple = evaluate(snake_case , {} , state=snake_case ) assert result == 3 self.assertDictEqual(snake_case , {"x": 3} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "test_dict = {'x': x, 'y': add_two(x)}" UpperCAmelCase : Tuple = {"x": 3} UpperCAmelCase : int = evaluate(snake_case , {"add_two": add_two} , state=snake_case ) self.assertDictEqual(snake_case , {"x": 3, "y": 5} ) self.assertDictEqual(snake_case , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = "x = 3\ny = 5" UpperCAmelCase : Optional[int] = {} UpperCAmelCase : str = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case , {"x": 3, "y": 5} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = "text = f'This is x: {x}.'" UpperCAmelCase : Optional[Any] = {"x": 3} UpperCAmelCase : Union[str, Any] = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(snake_case , {"x": 3, "text": "This is x: 3."} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "if x <= 3:\n y = 2\nelse:\n y = 5" UpperCAmelCase : Optional[Any] = {"x": 3} UpperCAmelCase : Any = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(snake_case , {"x": 3, "y": 2} ) UpperCAmelCase : Any = {"x": 8} UpperCAmelCase : Tuple = evaluate(snake_case , {} , state=snake_case ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case , {"x": 8, "y": 5} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = "test_list = [x, add_two(x)]" UpperCAmelCase : Tuple = {"x": 3} UpperCAmelCase : Optional[Any] = evaluate(snake_case , {"add_two": add_two} , state=snake_case ) self.assertListEqual(snake_case , [3, 5] ) self.assertDictEqual(snake_case , {"x": 3, "test_list": [3, 5]} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "y = x" UpperCAmelCase : Optional[Any] = {"x": 3} UpperCAmelCase : Tuple = evaluate(snake_case , {} , state=snake_case ) assert result == 3 self.assertDictEqual(snake_case , {"x": 3, "y": 3} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "test_list = [x, add_two(x)]\ntest_list[1]" UpperCAmelCase : List[Any] = {"x": 3} UpperCAmelCase : Any = evaluate(snake_case , {"add_two": add_two} , state=snake_case ) assert result == 5 self.assertDictEqual(snake_case , {"x": 3, "test_list": [3, 5]} ) UpperCAmelCase : Any = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" UpperCAmelCase : int = {"x": 3} UpperCAmelCase : Optional[int] = evaluate(snake_case , {"add_two": add_two} , state=snake_case ) assert result == 5 self.assertDictEqual(snake_case , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = "x = 0\nfor i in range(3):\n x = i" UpperCAmelCase : Union[str, Any] = {} UpperCAmelCase : List[str] = evaluate(snake_case , {"range": range} , state=snake_case ) assert result == 2 self.assertDictEqual(snake_case , {"x": 2, "i": 2} )
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = AlbertConfig.from_json_file(__magic_name__ ) print(F"Building PyTorch model from configuration: {config}" ) UpperCAmelCase : Optional[int] = AlbertForPreTraining(__magic_name__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(__magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , __magic_name__ ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean a : Tuple = 0 a : List[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right a : int = tuple[int, int] class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = pos_x UpperCAmelCase : int = pos_y UpperCAmelCase : Optional[int] = (pos_y, pos_x) UpperCAmelCase : int = goal_x UpperCAmelCase : Union[str, Any] = goal_y UpperCAmelCase : Union[str, Any] = g_cost UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : List[str] = self.calculate_heuristic() UpperCAmelCase : Union[str, Any] = self.g_cost + self.h_cost def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.pos_x - self.goal_x UpperCAmelCase : List[Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(snake_case ) + abs(snake_case ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , snake_case ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case ) UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case ) UpperCAmelCase : Any = [self.start] UpperCAmelCase : list[Node] = [] UpperCAmelCase : int = False def A_ ( self ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase : str = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(snake_case ) self.closed_nodes.append(snake_case ) UpperCAmelCase : Union[str, Any] = self.get_successors(snake_case ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(snake_case ) else: # retrieve the best current path UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(snake_case ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(snake_case ) else: self.open_nodes.append(snake_case ) return [self.start.pos] def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : int = [] for action in delta: UpperCAmelCase : Dict = parent.pos_x + action[1] UpperCAmelCase : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) ) return successors def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = node UpperCAmelCase : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase : Union[str, Any] = current_node.parent path.reverse() return path class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = AStar(snake_case , snake_case ) UpperCAmelCase : Union[str, Any] = AStar(snake_case , snake_case ) UpperCAmelCase : Union[str, Any] = False def A_ ( self ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase : str = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( snake_case , snake_case ) self.fwd_astar.closed_nodes.append(snake_case ) self.bwd_astar.closed_nodes.append(snake_case ) UpperCAmelCase : int = current_bwd_node UpperCAmelCase : List[str] = current_fwd_node UpperCAmelCase : Tuple = { self.fwd_astar: self.fwd_astar.get_successors(snake_case ), self.bwd_astar: self.bwd_astar.get_successors(snake_case ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(snake_case ) else: # retrieve the best current path UpperCAmelCase : Optional[int] = astar.open_nodes.pop( astar.open_nodes.index(snake_case ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(snake_case ) else: astar.open_nodes.append(snake_case ) return [self.fwd_astar.start.pos] def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.fwd_astar.retrace_path(snake_case ) UpperCAmelCase : int = self.bwd_astar.retrace_path(snake_case ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase : List[str] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] a : int = (0, 0) a : Tuple = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a : Union[str, Any] = time.time() a : str = AStar(init, goal) a : List[Any] = a_star.search() a : str = time.time() - start_time print(F'AStar execution time = {end_time:f} seconds') a : str = time.time() a : List[Any] = BidirectionalAStar(init, goal) a : List[Any] = time.time() - bd_start_time print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument a : Optional[Any] = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(s_dict.keys() ) for key in keys: UpperCAmelCase : Tuple = R".*/layers_(\d+)" UpperCAmelCase : str = key if re.match(__magic_name__ , __magic_name__ ): UpperCAmelCase : Optional[Any] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , __magic_name__ ) UpperCAmelCase : Tuple = R"(encoder|decoder)\/" if re.match(__magic_name__ , __magic_name__ ): UpperCAmelCase : Tuple = re.match(__magic_name__ , __magic_name__ ).groups() if groups[0] == "encoder": UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/" , R"/1/mlp/" , __magic_name__ ) UpperCAmelCase : int = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , __magic_name__ ) elif groups[0] == "decoder": UpperCAmelCase : Tuple = re.sub(R"/mlp/" , R"/2/mlp/" , __magic_name__ ) UpperCAmelCase : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , __magic_name__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCAmelCase : int = new_key.replace(__magic_name__ , __magic_name__ ) print(F"{key} -> {new_key}" ) UpperCAmelCase : Union[str, Any] = s_dict.pop(__magic_name__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase : Optional[Any] = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCAmelCase : Optional[Any] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCAmelCase : Any = s_dict[key].shape[0] UpperCAmelCase : Union[str, Any] = s_dict[key] for idx in range(__magic_name__ ): UpperCAmelCase : Tuple = expert_weihts[idx] print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" ) s_dict.pop(__magic_name__ ) return s_dict a : Dict = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' import regex as re with open(__magic_name__ , "r" ) as f: UpperCAmelCase : Optional[Any] = f.read() UpperCAmelCase : List[Any] = re.findall(R"(.*) = ([0-9.]*)" , __magic_name__ ) UpperCAmelCase : Optional[int] = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCAmelCase : Any = float(__magic_name__ ) if "." in value else int(__magic_name__ ) UpperCAmelCase : Dict = re.findall(R"(.*activations) = \(\'(.*)\',\)" , __magic_name__ )[0] UpperCAmelCase : Optional[Any] = str(activation[1] ) UpperCAmelCase : Dict = num_experts UpperCAmelCase : Optional[int] = SwitchTransformersConfig(**__magic_name__ ) return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="./" , __magic_name__=8 ): '''simple docstring''' print(F"Loading flax weights from : {flax_checkpoint_path}" ) UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(__magic_name__ ) if gin_file is not None: UpperCAmelCase : Any = convert_gin_to_config(__magic_name__ , __magic_name__ ) else: UpperCAmelCase : Tuple = SwitchTransformersConfig.from_pretrained(__magic_name__ ) UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(__magic_name__ ) UpperCAmelCase : str = flax_params["target"] UpperCAmelCase : str = flatten_dict(__magic_name__ , sep="/" ) UpperCAmelCase : str = rename_keys(__magic_name__ ) UpperCAmelCase : Optional[Any] = unflatten_dict(__magic_name__ , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(__magic_name__ , __magic_name__ ) print(F"Save PyTorch model to {pytorch_dump_path}" ) pt_model.save_pretrained(__magic_name__ ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") a : int = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = MvpTokenizer SCREAMING_SNAKE_CASE__ : str = MvpTokenizerFast SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Dict = filter_roberta_detectors def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Dict = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase : Any = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase : Any = {"unk_token": "<unk>"} UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case ) ) def A_ ( self , **snake_case ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def A_ ( self ): '''simple docstring''' return MvpTokenizer.from_pretrained("RUCAIBox/mvp" ) @cached_property def A_ ( self ): '''simple docstring''' return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" ) @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase : Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : List[str] = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(snake_case , snake_case ) # Test that special tokens are reset @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : List[str] = tokenizer(snake_case , padding=snake_case , return_tensors="pt" ) # check if input_ids are returned and no labels self.assertIn("input_ids" , snake_case ) self.assertIn("attention_mask" , snake_case ) self.assertNotIn("labels" , snake_case ) self.assertNotIn("decoder_attention_mask" , snake_case ) @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : Dict = tokenizer(text_target=snake_case , max_length=3_2 , padding="max_length" , return_tensors="pt" ) self.assertEqual(3_2 , targets["input_ids"].shape[1] ) @require_torch def A_ ( self ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : str = tokenizer( ["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" ) self.assertIsInstance(snake_case , snake_case ) self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) ) @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = ["A long paragraph for summarization."] UpperCAmelCase : Tuple = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase : str = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" ) UpperCAmelCase : Any = inputs["input_ids"] UpperCAmelCase : Any = inputs["labels"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(snake_case , **snake_case ) UpperCAmelCase : Optional[int] = "A, <mask> AllenNLP sentence." UpperCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case ) UpperCAmelCase : Optional[Any] = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) UpperCAmelCase : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) UpperCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' from collections.abc import Callable import numpy as np def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) ) UpperCAmelCase : Optional[int] = np.zeros((n + 1,) ) UpperCAmelCase : List[str] = ya UpperCAmelCase : List[str] = xa for k in range(__magic_name__ ): UpperCAmelCase : Optional[Any] = y[k] + step_size * ode_func(__magic_name__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ["sentencepiece"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["sentencepiece"] )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : List[Any] = { "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "canine" def __init__( self , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_6_3_8_4 , snake_case=1_6 , snake_case=0.02 , snake_case=1e-12 , snake_case=0 , snake_case=0xE_000 , snake_case=0xE_001 , snake_case=4 , snake_case=4 , snake_case=8 , snake_case=1_6_3_8_4 , snake_case=1_2_8 , **snake_case , ): '''simple docstring''' super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) UpperCAmelCase : Dict = max_position_embeddings UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : int = num_hidden_layers UpperCAmelCase : Optional[Any] = num_attention_heads UpperCAmelCase : Tuple = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : List[Any] = hidden_dropout_prob UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase : Dict = initializer_range UpperCAmelCase : List[str] = type_vocab_size UpperCAmelCase : Union[str, Any] = layer_norm_eps # Character config: UpperCAmelCase : List[str] = downsampling_rate UpperCAmelCase : int = upsampling_kernel_size UpperCAmelCase : str = num_hash_functions UpperCAmelCase : List[Any] = num_hash_buckets UpperCAmelCase : Tuple = local_transformer_stride
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 a : Tuple = get_tests_dir("fixtures") class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = mock.Mock() UpperCAmelCase : Optional[Any] = 5_0_0 UpperCAmelCase : List[Any] = {} UpperCAmelCase : int = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=snake_case ) as mock_head: UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @classmethod def A_ ( cls ): '''simple docstring''' UpperCAmelCase : int = TOKEN HfFolder.save_token(snake_case ) @classmethod def A_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(snake_case ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case , getattr(snake_case , snake_case ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( snake_case , repo_id="test-feature-extractor" , push_to_hub=snake_case , use_auth_token=self._token ) UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case , getattr(snake_case , snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case , getattr(snake_case , snake_case ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( snake_case , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=snake_case , use_auth_token=self._token ) UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(snake_case , getattr(snake_case , snake_case ) ) def A_ ( self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(snake_case ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=snake_case ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a : Union[str, Any] = logging.get_logger(__name__) a : List[Any] = {"vocab_file": "spiece.model"} a : Optional[Any] = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } a : Union[str, Any] = { "albert-base-v1": 5_12, "albert-large-v1": 5_12, "albert-xlarge-v1": 5_12, "albert-xxlarge-v1": 5_12, "albert-base-v2": 5_12, "albert-large-v2": 5_12, "albert-xlarge-v2": 5_12, "albert-xxlarge-v2": 5_12, } a : Optional[Any] = "▁" class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case , snake_case=True , snake_case=True , snake_case=False , snake_case="[CLS]" , snake_case="[SEP]" , snake_case="<unk>" , snake_case="[SEP]" , snake_case="<pad>" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : int = ( AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case ) if isinstance(snake_case , snake_case ) else mask_token ) UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) UpperCAmelCase : Union[str, Any] = do_lower_case UpperCAmelCase : Dict = remove_space UpperCAmelCase : Any = keep_accents UpperCAmelCase : int = vocab_file UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @property def A_ ( self ): '''simple docstring''' return len(self.sp_model ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.__dict__.copy() UpperCAmelCase : Dict = None return state def __setstate__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCAmelCase : int = {} UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A_ ( self , snake_case ): '''simple docstring''' if self.remove_space: UpperCAmelCase : Optional[int] = " ".join(inputs.strip().split() ) else: UpperCAmelCase : Union[str, Any] = inputs UpperCAmelCase : Optional[int] = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: UpperCAmelCase : Union[str, Any] = unicodedata.normalize("NFKD" , snake_case ) UpperCAmelCase : str = "".join([c for c in outputs if not unicodedata.combining(snake_case )] ) if self.do_lower_case: UpperCAmelCase : int = outputs.lower() return outputs def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.preprocess_text(snake_case ) UpperCAmelCase : Tuple = self.sp_model.encode(snake_case , out_type=snake_case ) UpperCAmelCase : List[Any] = [] for piece in pieces: if len(snake_case ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): UpperCAmelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: UpperCAmelCase : str = cur_pieces[1:] else: UpperCAmelCase : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(snake_case ) else: new_pieces.append(snake_case ) return new_pieces def A_ ( self , snake_case ): '''simple docstring''' return self.sp_model.PieceToId(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : int = [] UpperCAmelCase : Dict = "" UpperCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case ) + token UpperCAmelCase : List[str] = True UpperCAmelCase : int = [] else: current_sub_tokens.append(snake_case ) UpperCAmelCase : Union[str, Any] = False out_string += self.sp_model.decode(snake_case ) return out_string.strip() def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : Dict = [self.sep_token_id] UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def A_ ( self , snake_case , snake_case = None , snake_case = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is not None: return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1] return [1] + ([0] * len(snake_case )) + [1] def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : str = [self.sep_token_id] UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' if not os.path.isdir(snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase : int = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , "wb" ) as fi: UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=sys.maxsize ): '''simple docstring''' UpperCAmelCase : Tuple = "bilinear" UpperCAmelCase : str = max_size UpperCAmelCase : str = short_edge_length def __call__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = [] for img in imgs: UpperCAmelCase , UpperCAmelCase : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase : Tuple = size * 1.0 / min(snake_case , snake_case ) if h < w: UpperCAmelCase , UpperCAmelCase : List[Any] = size, scale * w else: UpperCAmelCase , UpperCAmelCase : Any = scale * h, size if max(snake_case , snake_case ) > self.max_size: UpperCAmelCase : int = self.max_size * 1.0 / max(snake_case , snake_case ) UpperCAmelCase : int = newh * scale UpperCAmelCase : Any = neww * scale UpperCAmelCase : Tuple = int(neww + 0.5 ) UpperCAmelCase : str = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase : Union[str, Any] = Image.fromarray(snake_case ) UpperCAmelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase : Tuple = np.asarray(snake_case ) else: UpperCAmelCase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase : Union[str, Any] = nn.functional.interpolate( snake_case , (newh, neww) , mode=self.interp_method , align_corners=snake_case ).squeeze(0 ) img_augs.append(snake_case ) return img_augs class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase : Any = cfg.INPUT.FORMAT UpperCAmelCase : List[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase : str = cfg.PAD_VALUE UpperCAmelCase : Tuple = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase : int = cfg.MODEL.DEVICE UpperCAmelCase : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase : Dict = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase : List[str] = lambda snake_case : (x - self.pixel_mean) / self.pixel_std def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = tuple(max(snake_case ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase : List[str] = [im.shape[-2:] for im in images] UpperCAmelCase : Union[str, Any] = [ nn.functional.pad( snake_case , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case , snake_case ) ] return torch.stack(snake_case ), torch.tensor(snake_case ) def __call__( self , snake_case , snake_case=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case , snake_case ): UpperCAmelCase : Optional[Any] = [images] if single_image: assert len(snake_case ) == 1 for i in range(len(snake_case ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case , images.pop(snake_case ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case , torch.as_tensor(img_tensorize(images.pop(snake_case ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase : Dict = self.aug(snake_case ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase : int = [self.normalizer(snake_case ) for x in images] # now pad them to do the following operations UpperCAmelCase , UpperCAmelCase : int = self.pad(snake_case ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase : List[str] = torch.true_divide(snake_case , snake_case ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' assert torch.isfinite(__magic_name__ ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase , UpperCAmelCase : List[Any] = box_size tensor[:, 0].clamp_(min=0 , max=__magic_name__ ) tensor[:, 1].clamp_(min=0 , max=__magic_name__ ) tensor[:, 2].clamp_(min=0 , max=__magic_name__ ) tensor[:, 3].clamp_(min=0 , max=__magic_name__ )
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a : List[str] = logging.get_logger(__name__) # General docstring a : str = "MobileNetV1Config" # Base docstring a : Any = "google/mobilenet_v1_1.0_224" a : Dict = [1, 10_24, 7, 7] # Image classification docstring a : Dict = "google/mobilenet_v1_1.0_224" a : Tuple = "tabby, tabby cat" a : List[str] = [ "google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = {} if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase : str = model.mobilenet_va else: UpperCAmelCase : List[Any] = model UpperCAmelCase : Any = "MobilenetV1/Conv2d_0/" UpperCAmelCase : Dict = backbone.conv_stem.convolution.weight UpperCAmelCase : Optional[int] = backbone.conv_stem.normalization.bias UpperCAmelCase : List[Any] = backbone.conv_stem.normalization.weight UpperCAmelCase : Any = backbone.conv_stem.normalization.running_mean UpperCAmelCase : Union[str, Any] = backbone.conv_stem.normalization.running_var for i in range(13 ): UpperCAmelCase : List[str] = i + 1 UpperCAmelCase : int = i * 2 UpperCAmelCase : Union[str, Any] = backbone.layer[pt_index] UpperCAmelCase : Optional[Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" UpperCAmelCase : int = pointer.convolution.weight UpperCAmelCase : List[Any] = pointer.normalization.bias UpperCAmelCase : Tuple = pointer.normalization.weight UpperCAmelCase : Optional[int] = pointer.normalization.running_mean UpperCAmelCase : int = pointer.normalization.running_var UpperCAmelCase : Optional[Any] = backbone.layer[pt_index + 1] UpperCAmelCase : Dict = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" UpperCAmelCase : Dict = pointer.convolution.weight UpperCAmelCase : str = pointer.normalization.bias UpperCAmelCase : Optional[Any] = pointer.normalization.weight UpperCAmelCase : Optional[Any] = pointer.normalization.running_mean UpperCAmelCase : Tuple = pointer.normalization.running_var if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/" UpperCAmelCase : Any = model.classifier.weight UpperCAmelCase : Union[str, Any] = model.classifier.bias return tf_to_pt_map def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model UpperCAmelCase : Tuple = tf.train.list_variables(__magic_name__ ) UpperCAmelCase : List[Any] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) UpperCAmelCase : List[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = array # Build TF to PyTorch weights loading map UpperCAmelCase : Union[str, Any] = _build_tf_to_pytorch_map(__magic_name__ , __magic_name__ , __magic_name__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue UpperCAmelCase : List[Any] = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) UpperCAmelCase : List[str] = np.transpose(__magic_name__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer UpperCAmelCase : List[Any] = array.squeeze().transpose() else: UpperCAmelCase : List[Any] = np.transpose(__magic_name__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) UpperCAmelCase : List[str] = torch.from_numpy(__magic_name__ ) tf_weights.pop(__magic_name__ , __magic_name__ ) tf_weights.pop(name + "/RMSProp" , __magic_name__ ) tf_weights.pop(name + "/RMSProp_1" , __magic_name__ ) tf_weights.pop(name + "/ExponentialMovingAverage" , __magic_name__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any = features.shape[-2:] UpperCAmelCase , UpperCAmelCase : Union[str, Any] = conv_layer.stride UpperCAmelCase , UpperCAmelCase : Dict = conv_layer.kernel_size if in_height % stride_height == 0: UpperCAmelCase : List[Any] = max(kernel_height - stride_height , 0 ) else: UpperCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: UpperCAmelCase : str = max(kernel_width - stride_width , 0 ) else: UpperCAmelCase : Dict = max(kernel_width - (in_width % stride_width) , 0 ) UpperCAmelCase : List[Any] = pad_along_width // 2 UpperCAmelCase : Tuple = pad_along_width - pad_left UpperCAmelCase : Dict = pad_along_height // 2 UpperCAmelCase : Any = pad_along_height - pad_top UpperCAmelCase : List[str] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__magic_name__ , __magic_name__ , "constant" , 0.0 ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1 , snake_case = 1 , snake_case = False , snake_case = True , snake_case = True , ): '''simple docstring''' super().__init__() UpperCAmelCase : Union[str, Any] = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." ) UpperCAmelCase : List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) UpperCAmelCase : Any = nn.Convad( in_channels=snake_case , out_channels=snake_case , kernel_size=snake_case , stride=snake_case , padding=snake_case , groups=snake_case , bias=snake_case , padding_mode="zeros" , ) if use_normalization: UpperCAmelCase : Tuple = nn.BatchNormad( num_features=snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=snake_case , track_running_stats=snake_case , ) else: UpperCAmelCase : Tuple = None if use_activation: if isinstance(snake_case , snake_case ): UpperCAmelCase : List[Any] = ACTaFN[use_activation] elif isinstance(config.hidden_act , snake_case ): UpperCAmelCase : Optional[int] = ACTaFN[config.hidden_act] else: UpperCAmelCase : Optional[int] = config.hidden_act else: UpperCAmelCase : str = None def A_ ( self , snake_case ): '''simple docstring''' if self.config.tf_padding: UpperCAmelCase : Union[str, Any] = apply_tf_padding(snake_case , self.convolution ) UpperCAmelCase : Tuple = self.convolution(snake_case ) if self.normalization is not None: UpperCAmelCase : List[str] = self.normalization(snake_case ) if self.activation is not None: UpperCAmelCase : str = self.activation(snake_case ) return features class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaConfig SCREAMING_SNAKE_CASE__ : Optional[Any] = load_tf_weights_in_mobilenet_va SCREAMING_SNAKE_CASE__ : List[Any] = "mobilenet_v1" SCREAMING_SNAKE_CASE__ : int = "pixel_values" SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self , snake_case ): '''simple docstring''' if isinstance(snake_case , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) a : str = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" a : Union[str, Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = True ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Tuple = config UpperCAmelCase : int = 3_2 UpperCAmelCase : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) UpperCAmelCase : List[str] = MobileNetVaConvLayer( snake_case , in_channels=config.num_channels , out_channels=snake_case , kernel_size=3 , stride=2 , ) UpperCAmelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] UpperCAmelCase : Any = nn.ModuleList() for i in range(1_3 ): UpperCAmelCase : List[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 UpperCAmelCase : Any = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( snake_case , in_channels=snake_case , out_channels=snake_case , kernel_size=3 , stride=strides[i] , groups=snake_case , ) ) self.layer.append( MobileNetVaConvLayer( snake_case , in_channels=snake_case , out_channels=snake_case , kernel_size=1 , ) ) UpperCAmelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def A_ ( self , snake_case ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A_ ( self , snake_case = None , snake_case = None , snake_case = None , ): '''simple docstring''' UpperCAmelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) UpperCAmelCase : Tuple = self.conv_stem(snake_case ) UpperCAmelCase : str = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): UpperCAmelCase : Union[str, Any] = layer_module(snake_case ) if output_hidden_states: UpperCAmelCase : int = all_hidden_states + (hidden_states,) UpperCAmelCase : Union[str, Any] = hidden_states if self.pooler is not None: UpperCAmelCase : int = torch.flatten(self.pooler(snake_case ) , start_dim=1 ) else: UpperCAmelCase : List[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=snake_case , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Optional[Any] = config.num_labels UpperCAmelCase : Any = MobileNetVaModel(snake_case ) UpperCAmelCase : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head UpperCAmelCase : int = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case ) UpperCAmelCase : str = nn.Linear(snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A_ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase : int = self.mobilenet_va(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) UpperCAmelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase : Dict = self.classifier(self.dropout(snake_case ) ) UpperCAmelCase : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase : List[Any] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase : Dict = "single_label_classification" else: UpperCAmelCase : Optional[int] = "multi_label_classification" if self.config.problem_type == "regression": UpperCAmelCase : Union[str, Any] = MSELoss() if self.num_labels == 1: UpperCAmelCase : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase : Tuple = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase : str = CrossEntropyLoss() UpperCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase : Union[str, Any] = BCEWithLogitsLoss() UpperCAmelCase : Optional[Any] = loss_fct(snake_case , snake_case ) if not return_dict: UpperCAmelCase : Any = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states , )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' import argparse a : List[str] = "docs/source/_static/js/custom.js" def lowercase ( __magic_name__ ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase : Optional[Any] = f.readlines() UpperCAmelCase : Any = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 UpperCAmelCase : Any = F"const stableVersion = \"v{version}\"\n" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += F" \"v{version}\": \"v{version}\",\n" with open(__magic_name__ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(__magic_name__ ) if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") a : Optional[int] = parser.parse_args() update_custom_js(args.version)
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowercase ( __magic_name__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = np.max(_outputs , axis=-1 , keepdims=__magic_name__ ) UpperCAmelCase : List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__magic_name__ ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "sigmoid" SCREAMING_SNAKE_CASE__ : List[Any] = "softmax" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "none" @add_end_docstrings( lowercase__ , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Optional[int] = ClassificationFunction.NONE def __init__( self , **snake_case ): '''simple docstring''' super().__init__(**snake_case ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def A_ ( self , snake_case=None , snake_case=None , snake_case="" , **snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = tokenizer_kwargs UpperCAmelCase : Dict = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: UpperCAmelCase : int = self.model.config.return_all_scores if isinstance(snake_case , snake_case ) or top_k is None: UpperCAmelCase : int = top_k UpperCAmelCase : Optional[Any] = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case , ) if return_all_scores: UpperCAmelCase : Optional[Any] = None else: UpperCAmelCase : int = 1 if isinstance(snake_case , snake_case ): UpperCAmelCase : int = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: UpperCAmelCase : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *snake_case , **snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = super().__call__(*snake_case , **snake_case ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. UpperCAmelCase : str = "top_k" not in kwargs if isinstance(args[0] , snake_case ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def A_ ( self , snake_case , **snake_case ): '''simple docstring''' UpperCAmelCase : Any = self.framework if isinstance(snake_case , snake_case ): return self.tokenizer(**snake_case , return_tensors=snake_case , **snake_case ) elif isinstance(snake_case , snake_case ) and len(snake_case ) == 1 and isinstance(inputs[0] , snake_case ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case , **snake_case ) elif isinstance(snake_case , snake_case ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case , return_tensors=snake_case , **snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return self.model(**snake_case ) def A_ ( self , snake_case , snake_case=None , snake_case=1 , snake_case=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: UpperCAmelCase : int = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: UpperCAmelCase : Union[str, Any] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: UpperCAmelCase : int = self.model.config.function_to_apply else: UpperCAmelCase : Union[str, Any] = ClassificationFunction.NONE UpperCAmelCase : Union[str, Any] = model_outputs["logits"][0] UpperCAmelCase : Any = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: UpperCAmelCase : Union[str, Any] = sigmoid(snake_case ) elif function_to_apply == ClassificationFunction.SOFTMAX: UpperCAmelCase : List[Any] = softmax(snake_case ) elif function_to_apply == ClassificationFunction.NONE: UpperCAmelCase : List[str] = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} UpperCAmelCase : List[Any] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case ) ] if not _legacy: dict_scores.sort(key=lambda snake_case : x["score"] , reverse=snake_case ) if top_k is not None: UpperCAmelCase : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' import cva import numpy as np class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' if k in (0.04, 0.06): UpperCAmelCase : int = k UpperCAmelCase : Dict = window_size else: raise ValueError("invalid k value" ) def __str__( self ): '''simple docstring''' return str(self.k ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = cva.imread(snake_case , 0 ) UpperCAmelCase , UpperCAmelCase : int = img.shape UpperCAmelCase : list[list[int]] = [] UpperCAmelCase : Tuple = img.copy() UpperCAmelCase : Optional[int] = cva.cvtColor(snake_case , cva.COLOR_GRAY2RGB ) UpperCAmelCase , UpperCAmelCase : int = np.gradient(snake_case ) UpperCAmelCase : int = dx**2 UpperCAmelCase : Optional[Any] = dy**2 UpperCAmelCase : Optional[int] = dx * dy UpperCAmelCase : Union[str, Any] = 0.04 UpperCAmelCase : str = self.window_size // 2 for y in range(snake_case , h - offset ): for x in range(snake_case , w - offset ): UpperCAmelCase : Any = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase : List[str] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase : List[str] = (wxx * wyy) - (wxy**2) UpperCAmelCase : Dict = wxx + wyy UpperCAmelCase : Tuple = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_5_5 ) return color_img, corner_list if __name__ == "__main__": a : Any = HarrisCorner(0.0_4, 3) a , a : Tuple = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() a : Any = 2 class UpperCamelCase__ : """simple docstring""" def __init__( self , *, # begin keyword-only arguments snake_case="<s>" , snake_case="<pad>" , snake_case="</s>" , snake_case="<unk>" , snake_case=None , ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = bos, unk, pad, eos UpperCAmelCase : Optional[Any] = [] UpperCAmelCase : Dict = [] UpperCAmelCase : Any = {} UpperCAmelCase : Optional[Any] = self.add_symbol(snake_case ) UpperCAmelCase : Dict = self.add_symbol(snake_case ) UpperCAmelCase : Any = self.add_symbol(snake_case ) UpperCAmelCase : Tuple = self.add_symbol(snake_case ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(snake_case ) UpperCAmelCase : str = len(self.symbols ) def __eq__( self , snake_case ): '''simple docstring''' return self.indices == other.indices def __getitem__( self , snake_case ): '''simple docstring''' if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): '''simple docstring''' return len(self.symbols ) def __contains__( self , snake_case ): '''simple docstring''' return sym in self.indices @classmethod def A_ ( cls , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = cls() d.add_from_file(snake_case ) return d def A_ ( self , snake_case , snake_case=1 , snake_case=False ): '''simple docstring''' if word in self.indices and not overwrite: UpperCAmelCase : Dict = self.indices[word] UpperCAmelCase : List[Any] = self.count[idx] + n return idx else: UpperCAmelCase : Optional[int] = len(self.symbols ) UpperCAmelCase : Union[str, Any] = idx self.symbols.append(snake_case ) self.count.append(snake_case ) return idx def A_ ( self , snake_case ): '''simple docstring''' return 0 def A_ ( self , snake_case ): '''simple docstring''' if isinstance(snake_case , snake_case ): try: with open(snake_case , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case ) ) return UpperCAmelCase : List[str] = f.readlines() UpperCAmelCase : Dict = self._load_meta(snake_case ) for line in lines[indices_start_line:]: try: UpperCAmelCase , UpperCAmelCase : List[Any] = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": UpperCAmelCase : Union[str, Any] = True UpperCAmelCase , UpperCAmelCase : List[Any] = line.rsplit(" " , 1 ) else: UpperCAmelCase : Dict = False UpperCAmelCase : Dict = int(snake_case ) UpperCAmelCase : Optional[Any] = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(snake_case ) ) self.add_symbol(snake_case , n=snake_case , overwrite=snake_case ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = dict((re.sub(R"@@$" , "" , __magic_name__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __magic_name__ ), v) for k, v in d.items() ) UpperCAmelCase : Optional[int] = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"{k}</w>"] UpperCAmelCase : List[str] = d[k] # restore return da def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if not os.path.exists(__magic_name__ ): raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) print(F"Writing results to {pytorch_dump_folder_path}" ) # handle various types of models UpperCAmelCase : int = os.path.join(__magic_name__ , "checkpoint.pt" ) if not os.path.isfile(__magic_name__ ): raise ValueError(F"path to the file {checkpoint_file} does not exist!" ) UpperCAmelCase : Optional[int] = torch.load(__magic_name__ , map_location="cpu" ) UpperCAmelCase : int = chkpt["cfg"]["model"] # dicts UpperCAmelCase : Dict = os.path.join(__magic_name__ , "dict.txt" ) if not os.path.isfile(__magic_name__ ): raise ValueError(F"path to the file {dict_file} does not exist!" ) UpperCAmelCase : Optional[int] = Dictionary.load(__magic_name__ ) UpperCAmelCase : Any = rewrite_dict_keys(src_dict.indices ) UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[Any] = os.path.join(__magic_name__ , VOCAB_FILES_NAMES["vocab_file"] ) print(F"Generating {src_vocab_file} of {src_vocab_size} records" ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__magic_name__ , ensure_ascii=__magic_name__ , indent=__magic_name__ ) ) # merges_file (bpecodes) UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , "bpecodes" ) if not os.path.isfile(__magic_name__ ): raise ValueError(F"path to the file {bpecodes_file} does not exist!" ) UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(__magic_name__ , __magic_name__ ) # model config UpperCAmelCase : Dict = os.path.join(__magic_name__ , "config.json" ) UpperCAmelCase : Tuple = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.0_2, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1e-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"Generating {biogpt_model_config_file}" ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__magic_name__ , ensure_ascii=__magic_name__ , indent=__magic_name__ ) ) # tokenizer config UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"Generating {biogpt_tokenizer_config_file}" ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__magic_name__ , ensure_ascii=__magic_name__ , indent=__magic_name__ ) ) # model UpperCAmelCase : Optional[Any] = chkpt["model"] # remove unneeded keys UpperCAmelCase : Any = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(__magic_name__ , __magic_name__ ) UpperCAmelCase : Tuple = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): UpperCAmelCase : Optional[Any] = model_state_dict.pop(__magic_name__ ) else: UpperCAmelCase : List[str] = model_state_dict.pop(__magic_name__ ) UpperCAmelCase : int = BioGptConfig.from_pretrained(__magic_name__ ) UpperCAmelCase : str = BioGptForCausalLM(__magic_name__ ) # check that it loads ok model_new.load_state_dict(__magic_name__ ) # save UpperCAmelCase : Tuple = os.path.join(__magic_name__ , __magic_name__ ) print(F"Generating {pytorch_weights_dump_path}" ) torch.save(__magic_name__ , __magic_name__ ) print("Conversion is done!" ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--biogpt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a : Tuple = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=[1, 1_6, 4, 4] , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Tuple = parent UpperCAmelCase : Union[str, Any] = batch_size UpperCAmelCase : int = image_size UpperCAmelCase : Dict = patch_size UpperCAmelCase : Dict = num_channels UpperCAmelCase : str = is_training UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : List[Any] = intermediate_size UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase : Optional[Any] = type_sequence_label_size UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[int] = scope UpperCAmelCase : Dict = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size UpperCAmelCase : Tuple = (self.image_size // 3_2) ** 2 UpperCAmelCase : List[str] = num_patches + 1 def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Optional[int] = None if self.use_labels: UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Dict = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 1_6, 3_2], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=snake_case , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = ViTHybridModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.type_sequence_label_size UpperCAmelCase : Tuple = ViTHybridForImageClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs UpperCAmelCase : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : int = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ViTHybridModelTester(self ) UpperCAmelCase : int = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(snake_case ) UpperCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[Any] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(config=snake_case ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": UpperCAmelCase : List[str] = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def A_ ( self ): '''simple docstring''' for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Optional[Any] = ViTHybridModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( snake_case ) UpperCAmelCase : List[Any] = self.default_image_processor UpperCAmelCase : Any = prepare_img() UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase : Optional[Any] = model(**snake_case ) # verify the logits UpperCAmelCase : Dict = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) ) @slow @require_accelerate def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" ) UpperCAmelCase : Optional[Any] = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" ) UpperCAmelCase : int = prepare_img() UpperCAmelCase : Dict = image_processor(images=snake_case , return_tensors="pt" ) UpperCAmelCase : Tuple = model(**snake_case ) UpperCAmelCase : Dict = outputs.logits # model predicts one of the 1000 ImageNet classes UpperCAmelCase : Optional[Any] = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' from __future__ import annotations import bisect def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ): '''simple docstring''' if hi < 0: UpperCAmelCase : int = len(__magic_name__ ) while lo < hi: UpperCAmelCase : int = lo + (hi - lo) // 2 if sorted_collection[mid] < item: UpperCAmelCase : Optional[Any] = mid + 1 else: UpperCAmelCase : List[Any] = mid return lo def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ): '''simple docstring''' if hi < 0: UpperCAmelCase : Union[str, Any] = len(__magic_name__ ) while lo < hi: UpperCAmelCase : str = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: UpperCAmelCase : List[str] = mid + 1 else: UpperCAmelCase : int = mid return lo def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ): '''simple docstring''' sorted_collection.insert(bisect_left(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ): '''simple docstring''' sorted_collection.insert(bisect_right(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 0 UpperCAmelCase : int = len(__magic_name__ ) - 1 while left <= right: UpperCAmelCase : List[Any] = left + (right - left) // 2 UpperCAmelCase : str = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: UpperCAmelCase : List[Any] = midpoint - 1 else: UpperCAmelCase : int = midpoint + 1 return None def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = bisect.bisect_left(__magic_name__ , __magic_name__ ) if index != len(__magic_name__ ) and sorted_collection[index] == item: return index return None def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' if right < left: return None UpperCAmelCase : int = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , midpoint - 1 ) else: return binary_search_by_recursion(__magic_name__ , __magic_name__ , midpoint + 1 , __magic_name__ ) if __name__ == "__main__": a : Any = input("Enter numbers separated by comma:\n").strip() a : List[str] = sorted(int(item) for item in user_input.split(",")) a : Optional[int] = int(input("Enter a single number to be found in the list:\n")) a : Any = binary_search(collection, target) if result is None: print(F'{target} was not found in {collection}.') else: print(F'{target} was found at position {result} in {collection}.')
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : str = field( metadata={"help": "The output directory where the model will be written."} , ) SCREAMING_SNAKE_CASE__ : str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE__ : str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments,) ) ((UpperCAmelCase) , ) : Dict = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed UpperCAmelCase : int = True UpperCAmelCase : List[Any] = True UpperCAmelCase : Union[str, Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__magic_name__ , decoder_config=__magic_name__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens UpperCAmelCase : Any = decoder_config.decoder_start_token_id UpperCAmelCase : List[str] = decoder_config.pad_token_id if decoder_start_token_id is None: UpperCAmelCase : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: UpperCAmelCase : List[Any] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work UpperCAmelCase : Optional[Any] = decoder_config.eos_token_id UpperCAmelCase : Optional[int] = decoder_start_token_id UpperCAmelCase : Optional[Any] = pad_token_id UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] UpperCAmelCase : List[Any] = 6 UpperCAmelCase : Any = 1 UpperCAmelCase : List[Any] = 1901 UpperCAmelCase : str = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 UpperCAmelCase : int = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 UpperCAmelCase : Optional[int] = day - 29 else: if day > days_per_month[month - 1]: month += 1 UpperCAmelCase : Tuple = day - days_per_month[month - 2] if month > 12: year += 1 UpperCAmelCase : List[Any] = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [1] UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = 0, 0, 0 UpperCAmelCase : Dict = ugly_nums[ia] * 2 UpperCAmelCase : Union[str, Any] = ugly_nums[ia] * 3 UpperCAmelCase : int = ugly_nums[ia] * 5 for _ in range(1 , __magic_name__ ): UpperCAmelCase : Optional[Any] = min(__magic_name__ , __magic_name__ , __magic_name__ ) ugly_nums.append(__magic_name__ ) if next_num == next_a: ia += 1 UpperCAmelCase : List[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 UpperCAmelCase : List[str] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 UpperCAmelCase : Optional[Any] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'{ugly_numbers(2_00) = }')
311
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase : Tuple = 192 UpperCAmelCase : str = 768 UpperCAmelCase : List[Any] = 12 UpperCAmelCase : List[Any] = 3 UpperCAmelCase : List[Any] = [800, 1333] UpperCAmelCase : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Union[str, Any] = 330 UpperCAmelCase : Union[str, Any] = 14 UpperCAmelCase : Any = 6 UpperCAmelCase : int = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase : Union[str, Any] = 384 UpperCAmelCase : Dict = 1536 UpperCAmelCase : str = 12 UpperCAmelCase : List[str] = 6 elif "yolos_b" in yolos_name: UpperCAmelCase : int = [800, 1344] UpperCAmelCase : Optional[int] = 91 UpperCAmelCase : int = "huggingface/label-files" UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json" UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCAmelCase : str = idalabel UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :] UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def lowercase ( __magic_name__ ): '''simple docstring''' if "backbone" in name: UpperCAmelCase : int = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase : Any = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ ) if "qkv" in key: UpperCAmelCase : str = key.split("." ) UpperCAmelCase : List[Any] = int(key_split[2] ) UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase : Optional[int] = val[:dim, :] UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase : Any = val[-dim:, :] else: UpperCAmelCase : Tuple = val[:dim] UpperCAmelCase : List[str] = val[dim : dim * 2] UpperCAmelCase : Any = val[-dim:] else: UpperCAmelCase : Union[str, Any] = val return orig_state_dict def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ): '''simple docstring''' UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ ) # load original state_dict UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ ) model.eval() UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase : List[str] = model(**__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None if yolos_name == "yolos_ti": UpperCAmelCase : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) UpperCAmelCase : Tuple = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase : Union[str, Any] = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) UpperCAmelCase : List[str] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase : List[str] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) UpperCAmelCase : Dict = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase : Dict = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) UpperCAmelCase : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": UpperCAmelCase : str = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__magic_name__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCAmelCase : int = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase : Tuple = model_mapping[yolos_name] image_processor.push_to_hub(__magic_name__ , organization="hustvl" ) model.push_to_hub(__magic_name__ , organization="hustvl" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a : str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
311
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = (UniPCMultistepScheduler,) SCREAMING_SNAKE_CASE__ : Tuple = (("num_inference_steps", 25),) def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**snake_case ) return config def A_ ( self , snake_case=0 , **snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case ) UpperCAmelCase : List[Any] = self.dummy_sample UpperCAmelCase : Dict = 0.1 * sample UpperCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase : List[str] = self.get_scheduler_config(**snake_case ) UpperCAmelCase : Union[str, Any] = scheduler_class(**snake_case ) scheduler.set_timesteps(snake_case ) # copy over dummy past residuals UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case ) UpperCAmelCase : Tuple = scheduler_class.from_pretrained(snake_case ) new_scheduler.set_timesteps(snake_case ) # copy over dummy past residuals UpperCAmelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase , UpperCAmelCase : Any = sample, sample for t in range(snake_case , time_step + scheduler.config.solver_order + 1 ): UpperCAmelCase : Optional[int] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample UpperCAmelCase : List[str] = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def A_ ( self , snake_case=0 , **snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase : int = kwargs.pop("num_inference_steps" , snake_case ) UpperCAmelCase : Any = self.dummy_sample UpperCAmelCase : str = 0.1 * sample UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: UpperCAmelCase : int = self.get_scheduler_config() UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case ) scheduler.set_timesteps(snake_case ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case ) UpperCAmelCase : int = scheduler_class.from_pretrained(snake_case ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order] UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample UpperCAmelCase : str = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def A_ ( self , snake_case=None , **snake_case ): '''simple docstring''' if scheduler is None: UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase : str = self.get_scheduler_config(**snake_case ) UpperCAmelCase : List[str] = scheduler_class(**snake_case ) UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**snake_case ) UpperCAmelCase : Tuple = scheduler_class(**snake_case ) UpperCAmelCase : Optional[int] = 1_0 UpperCAmelCase : Dict = self.dummy_model() UpperCAmelCase : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(snake_case ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : List[str] = model(snake_case , snake_case ) UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample return sample def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = dict(self.forward_default_kwargs ) UpperCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case ) for scheduler_class in self.scheduler_classes: UpperCAmelCase : Tuple = self.get_scheduler_config() UpperCAmelCase : List[Any] = scheduler_class(**snake_case ) UpperCAmelCase : Optional[int] = self.dummy_sample UpperCAmelCase : int = 0.1 * sample if num_inference_steps is not None and hasattr(snake_case , "set_timesteps" ): scheduler.set_timesteps(snake_case ) elif num_inference_steps is not None and not hasattr(snake_case , "set_timesteps" ): UpperCAmelCase : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] UpperCAmelCase : List[str] = scheduler.timesteps[5] UpperCAmelCase : str = scheduler.timesteps[6] UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample UpperCAmelCase : Optional[int] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() ) UpperCAmelCase : Tuple = self.full_loop(scheduler=snake_case ) UpperCAmelCase : Dict = torch.mean(torch.abs(snake_case ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 UpperCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) UpperCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : int = UniPCMultistepScheduler.from_config(scheduler.config ) UpperCAmelCase : List[str] = self.full_loop(scheduler=snake_case ) UpperCAmelCase : int = torch.mean(torch.abs(snake_case ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def A_ ( self ): '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case ) def A_ ( self ): '''simple docstring''' self.check_over_configs(thresholding=snake_case ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , solver_order=snake_case , solver_type=snake_case , ) def A_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def A_ ( self ): '''simple docstring''' for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , ) UpperCAmelCase : List[str] = self.full_loop( solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , ) assert not torch.isnan(snake_case ).any(), "Samples have nan numbers" def A_ ( self ): '''simple docstring''' self.check_over_configs(lower_order_final=snake_case ) self.check_over_configs(lower_order_final=snake_case ) def A_ ( self ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=snake_case , time_step=0 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.full_loop() UpperCAmelCase : Dict = torch.mean(torch.abs(snake_case ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase : Dict = torch.mean(torch.abs(snake_case ) ) assert abs(result_mean.item() - 0.1014 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.scheduler_classes[0] UpperCAmelCase : Any = self.get_scheduler_config(thresholding=snake_case , dynamic_thresholding_ratio=0 ) UpperCAmelCase : Any = scheduler_class(**snake_case ) UpperCAmelCase : List[Any] = 1_0 UpperCAmelCase : Tuple = self.dummy_model() UpperCAmelCase : List[Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(snake_case ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Union[str, Any] = model(snake_case , snake_case ) UpperCAmelCase : Optional[int] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample assert sample.dtype == torch.floataa def A_ ( self , **snake_case ): '''simple docstring''' for scheduler_class in self.scheduler_classes: UpperCAmelCase : Any = self.get_scheduler_config(**snake_case ) UpperCAmelCase : Optional[int] = scheduler_class(**snake_case ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) a : Tuple = logging.getLogger(__name__) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Any = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." ) UpperCAmelCase : List[Any] = parser.parse_args() logger.info(F"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>` UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(F"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: UpperCAmelCase : str = fp.readlines() logger.info("Start encoding" ) logger.info(F"{len(__magic_name__ )} examples to process." ) UpperCAmelCase : int = [] UpperCAmelCase : int = 0 UpperCAmelCase : Union[str, Any] = 1_0000 UpperCAmelCase : Union[str, Any] = time.time() for text in data: UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}" UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) rslt.append(__magic_name__ ) iter += 1 if iter % interval == 0: UpperCAmelCase : Dict = time.time() logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCAmelCase : Any = time.time() logger.info("Finished binarization" ) logger.info(F"{len(__magic_name__ )} examples processed." ) UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCAmelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt] else: UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F"Dump to {dp_file}" ) with open(__magic_name__ , "wb" ) as handle: pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
311
1
'''simple docstring''' from __future__ import annotations a : Tuple = list[tuple[int, int]] a : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[int] = pos_x UpperCAmelCase : int = pos_y UpperCAmelCase : Optional[Any] = (pos_y, pos_x) UpperCAmelCase : int = goal_x UpperCAmelCase : str = goal_y UpperCAmelCase : Optional[Any] = g_cost UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : Dict = self.calculate_heuristic() def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = abs(self.pos_x - self.goal_x ) UpperCAmelCase : Optional[int] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , snake_case ): '''simple docstring''' return self.f_cost < other.f_cost class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case ) UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case ) UpperCAmelCase : Any = [self.start] UpperCAmelCase : list[Node] = [] UpperCAmelCase : str = False def A_ ( self ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase : str = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase : str = True return self.retrace_path(snake_case ) self.closed_nodes.append(snake_case ) UpperCAmelCase : List[Any] = self.get_successors(snake_case ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(snake_case ) else: # retrieve the best current path UpperCAmelCase : Dict = self.open_nodes.pop(self.open_nodes.index(snake_case ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(snake_case ) else: self.open_nodes.append(snake_case ) if not self.reached: return [self.start.pos] return None def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for action in delta: UpperCAmelCase : List[Any] = parent.pos_x + action[1] UpperCAmelCase : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) ) return successors def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = node UpperCAmelCase : Dict = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase : Dict = current_node.parent path.reverse() return path if __name__ == "__main__": a : List[Any] = (0, 0) a : Any = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") a : Any = GreedyBestFirst(init, goal) a : Any = greedy_bf.search() if path: for pos_x, pos_y in path: a : str = 2 for elem in grid: print(elem)
311
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a : Tuple = ["gpt2"] a : Dict = "gpt2" if is_tf_available(): class UpperCamelCase__ ( tf.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = tokenizer UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case ) UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case ) UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor() UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"] return outputs @require_tf @require_keras_nlp class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)] UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase : Tuple = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A_ ( self ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" ) UpperCAmelCase : Any = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors UpperCAmelCase : Dict = python_outputs[key].numpy() UpperCAmelCase : List[str] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Optional[Any] = tf.function(snake_case ) for test_inputs in self.test_sentences: UpperCAmelCase : List[str] = tf.constant(snake_case ) UpperCAmelCase : Dict = compiled_tokenizer(snake_case ) UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : int = ModelToSave(tokenizer=snake_case ) UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model" tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} ) UpperCAmelCase : int = tf.saved_model.load(snake_case ) UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config() UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case ) UpperCAmelCase : Tuple = model_from_config(snake_case ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A_ ( self ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run UpperCAmelCase : List[str] = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] ) UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case ) UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1] assert out_length == max_length
311
1
'''simple docstring''' from __future__ import annotations def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = 2 UpperCAmelCase : Optional[Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(__magic_name__ ) if n > 1: factors.append(__magic_name__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
'''simple docstring''' import argparse from collections import defaultdict import yaml a : str = "docs/source/en/_toctree.yml" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = defaultdict(__magic_name__ ) for doc in model_doc: counts[doc["local"]] += 1 UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] UpperCAmelCase : Dict = [] for duplicate_key in duplicates: UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(__magic_name__ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() ) def lowercase ( __magic_name__=False ): '''simple docstring''' with open(__magic_name__ , encoding="utf-8" ) as f: UpperCAmelCase : Any = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"] # Then to the model doc UpperCAmelCase : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 UpperCAmelCase : str = api_doc[model_idx]["sections"] UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section] UpperCAmelCase : Optional[int] = False for idx, modality_doc in modalities_docs: UpperCAmelCase : int = modality_doc["sections"] UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ ) if old_modality_doc != new_modality_doc: UpperCAmelCase : int = True if overwrite: UpperCAmelCase : Dict = new_modality_doc if diff: if overwrite: UpperCAmelCase : Any = model_doc UpperCAmelCase : Any = api_doc with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") a : Optional[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
311
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor a : str = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , *snake_case , **snake_case ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case , ) super().__init__(*snake_case , **snake_case )
311
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
1
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' debug_launcher(test_script.main ) def A_ ( self ): '''simple docstring''' debug_launcher(test_ops.main )
311
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
1
'''simple docstring''' def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return x if y == 0 else greatest_common_divisor(__magic_name__ , x % y ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (x * y) // greatest_common_divisor(__magic_name__ , __magic_name__ ) def lowercase ( __magic_name__ = 20 ): '''simple docstring''' UpperCAmelCase : Tuple = 1 for i in range(1 , n + 1 ): UpperCAmelCase : Optional[int] = lcm(__magic_name__ , __magic_name__ ) return g if __name__ == "__main__": print(F'{solution() = }')
311
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Optional[Any] = ["model.decoder.embed_positions.weights"] def lowercase ( __magic_name__ ): '''simple docstring''' if "emb" in name: UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase : int = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = list(state_dict.keys() ) UpperCAmelCase : List[Any] = {} for key in keys: UpperCAmelCase : Any = state_dict.pop(__magic_name__ ) UpperCAmelCase : str = rename_keys(__magic_name__ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Optional[int] = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : str = val else: UpperCAmelCase : int = val return state_dict, enc_dec_proj_state_dict def lowercase ( __magic_name__ ): '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1024 UpperCAmelCase : Tuple = 24 UpperCAmelCase : Union[str, Any] = 16 elif checkpoint == "medium": UpperCAmelCase : List[Any] = 1536 UpperCAmelCase : Optional[Any] = 48 UpperCAmelCase : List[str] = 24 elif checkpoint == "large": UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : str = 48 UpperCAmelCase : Optional[Any] = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) UpperCAmelCase : Tuple = MusicgenDecoderConfig( hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , ) return config @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ ) UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ ) UpperCAmelCase : Dict = fairseq_model.lm.state_dict() UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict( __magic_name__ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__magic_name__ ) if len(__magic_name__ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(__magic_name__ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__magic_name__ ) # check we can do a forward pass UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) # set the appropriate bos/pad token ids UpperCAmelCase : List[Any] = 2048 UpperCAmelCase : Tuple = 2048 # set other default generation config params UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase : str = True UpperCAmelCase : Tuple = 3.0 if pytorch_dump_folder is not None: Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(__magic_name__ ) processor.save_pretrained(__magic_name__ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(__magic_name__ ) processor.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a : int = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
311
1
'''simple docstring''' def lowercase ( __magic_name__ = 1000 ): '''simple docstring''' UpperCAmelCase : str = 2**power UpperCAmelCase : List[Any] = 0 while n: UpperCAmelCase , UpperCAmelCase : Dict = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
311
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) UpperCAmelCase : Optional[int] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(f"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() ) @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(snake_case , env=os.environ.copy() ) if __name__ == "__main__": a : Union[str, Any] = Accelerator() a : str = (accelerator.state.process_index + 2, 10) a : List[str] = torch.randint(0, 10, shape).to(accelerator.device) a : Optional[int] = "" a : int = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
311
1
'''simple docstring''' import string import numpy def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ ) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) SCREAMING_SNAKE_CASE__ : Tuple = numpy.vectorize(lambda lowercase__ : x % 36 ) SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.vectorize(lowercase__ ) def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.modulus(snake_case ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key UpperCAmelCase : List[Any] = encrypt_key.shape[0] def A_ ( self , snake_case ): '''simple docstring''' return self.key_string.index(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' return self.key_string[round(snake_case )] def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: UpperCAmelCase : Optional[Any] = det % len(self.key_string ) UpperCAmelCase : Tuple = len(self.key_string ) if greatest_common_divisor(snake_case , len(self.key_string ) ) != 1: UpperCAmelCase : Tuple = ( f"determinant modular {req_l} of encryption key({det}) " f"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Any = [char for char in text.upper() if char in self.key_string] UpperCAmelCase : Union[str, Any] = chars[-1] while len(snake_case ) % self.break_key != 0: chars.append(snake_case ) return "".join(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.process_text(text.upper() ) UpperCAmelCase : Any = "" for i in range(0 , len(snake_case ) - self.break_key + 1 , self.break_key ): UpperCAmelCase : Any = text[i : i + self.break_key] UpperCAmelCase : str = [self.replace_letters(snake_case ) for char in batch] UpperCAmelCase : Any = numpy.array([vec] ).T UpperCAmelCase : Optional[int] = self.modulus(self.encrypt_key.dot(snake_case ) ).T.tolist()[ 0 ] UpperCAmelCase : List[str] = "".join( self.replace_digits(snake_case ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: UpperCAmelCase : List[str] = det % len(self.key_string ) UpperCAmelCase : Tuple = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: UpperCAmelCase : Tuple = i break UpperCAmelCase : List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(snake_case ) ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.make_decrypt_key() UpperCAmelCase : List[str] = self.process_text(text.upper() ) UpperCAmelCase : str = "" for i in range(0 , len(snake_case ) - self.break_key + 1 , self.break_key ): UpperCAmelCase : Union[str, Any] = text[i : i + self.break_key] UpperCAmelCase : Optional[int] = [self.replace_letters(snake_case ) for char in batch] UpperCAmelCase : List[Any] = numpy.array([vec] ).T UpperCAmelCase : str = self.modulus(decrypt_key.dot(snake_case ) ).T.tolist()[0] UpperCAmelCase : Optional[int] = "".join( self.replace_digits(snake_case ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowercase ( ): '''simple docstring''' UpperCAmelCase : Tuple = int(input("Enter the order of the encryption key: " ) ) UpperCAmelCase : List[str] = [] print("Enter each row of the encryption key with space separated integers" ) for _ in range(__magic_name__ ): UpperCAmelCase : Optional[Any] = [int(__magic_name__ ) for x in input().split()] hill_matrix.append(__magic_name__ ) UpperCAmelCase : int = HillCipher(numpy.array(__magic_name__ ) ) print("Would you like to encrypt or decrypt some text? (1 or 2)" ) UpperCAmelCase : Tuple = input("\n1. Encrypt\n2. Decrypt\n" ) if option == "1": UpperCAmelCase : Any = input("What text would you like to encrypt?: " ) print("Your encrypted text is:" ) print(hc.encrypt(__magic_name__ ) ) elif option == "2": UpperCAmelCase : int = input("What text would you like to decrypt?: " ) print("Your decrypted text is:" ) print(hc.decrypt(__magic_name__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case ): '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM UpperCAmelCase : Any = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=snake_case , scheduler=snake_case ) @torch.no_grad() def __call__( self , snake_case = 1 , snake_case = None , snake_case = 0.0 , snake_case = 5_0 , snake_case = None , snake_case = "pil" , snake_case = True , ): '''simple docstring''' if isinstance(self.unet.config.sample_size , snake_case ): UpperCAmelCase : Dict = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: UpperCAmelCase : List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(snake_case )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) UpperCAmelCase : Union[str, Any] = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase : List[Any] = self.unet(snake_case , snake_case ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 UpperCAmelCase : Union[str, Any] = self.scheduler.step( snake_case , snake_case , snake_case , eta=snake_case , use_clipped_model_output=snake_case , generator=snake_case ).prev_sample UpperCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase : Dict = self.numpy_to_pil(snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number > 0: raise ValueError("input must be a negative integer" ) UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] ) UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase : Tuple = ( ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else "0" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils ) UpperCAmelCase : Union[str, Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 UpperCAmelCase : List[Any] = test_metrics @require_cpu def A_ ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def A_ ( self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def A_ ( self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def A_ ( self ): '''simple docstring''' print(f"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase : Optional[int] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case , env=os.environ.copy() )
311
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split a : int = datasets.load_iris() a : Union[str, Any] = np.array(data["data"]) a : Optional[Any] = np.array(data["target"]) a : List[Any] = data["target_names"] a , a , a , a : Dict = train_test_split(X, y) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ): '''simple docstring''' UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ ) # List of distances of all points from the point to be classified UpperCAmelCase : List[Any] = [] for data_point in data: UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
311
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch a : List[str] = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["pixel_values"] def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = None , snake_case = None , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 2_5_6} UpperCAmelCase : Optional[Any] = get_size_dict(snake_case , default_to_square=snake_case ) UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} UpperCAmelCase : Union[str, Any] = get_size_dict(snake_case , param_name="crop_size" ) UpperCAmelCase : Dict = do_resize UpperCAmelCase : Optional[Any] = size UpperCAmelCase : List[str] = resample UpperCAmelCase : Any = do_center_crop UpperCAmelCase : List[Any] = crop_size UpperCAmelCase : Any = do_rescale UpperCAmelCase : Optional[Any] = rescale_factor UpperCAmelCase : Optional[int] = do_normalize UpperCAmelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def A_ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : Any = get_size_dict(snake_case , default_to_square=snake_case ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase : List[Any] = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case ) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : List[Any] = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case ): '''simple docstring''' return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ): '''simple docstring''' UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : Union[str, Any] = size if size is not None else self.size UpperCAmelCase : Optional[int] = get_size_dict(snake_case , default_to_square=snake_case ) UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample UpperCAmelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase : List[str] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase : Optional[Any] = get_size_dict(snake_case , param_name="crop_size" ) UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean UpperCAmelCase : List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase : Any = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase : List[str] = [to_numpy_array(snake_case ) for image in images] if do_resize: UpperCAmelCase : Tuple = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images] if do_center_crop: UpperCAmelCase : List[str] = [self.center_crop(image=snake_case , size=snake_case ) for image in images] if do_rescale: UpperCAmelCase : Dict = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_normalize: UpperCAmelCase : int = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images] UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(snake_case , snake_case ) for image in images] UpperCAmelCase : Optional[Any] = {"pixel_values": images} return BatchFeature(data=snake_case , tensor_type=snake_case ) def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : str = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(snake_case ) != len(snake_case ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(snake_case ): UpperCAmelCase : List[str] = target_sizes.numpy() UpperCAmelCase : Optional[int] = [] for idx in range(len(snake_case ) ): UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case ) UpperCAmelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(snake_case ) else: UpperCAmelCase : Any = logits.argmax(dim=1 ) UpperCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : Tuple = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' UpperCAmelCase : List[str] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) UpperCAmelCase : Any = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCAmelCase : List[Any] = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case ) UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase : str = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , ) for _ in range(1_0_0_0 ): UpperCAmelCase : str = criterion(snake_case , snake_case ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE__ : Optional[int] = 10 def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ): '''simple docstring''' self.assertEqual(len(snake_case ) , len(snake_case ) ) for a, b in zip(snake_case , snake_case ): self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase : int = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase , UpperCAmelCase : Any = data UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps ) self.assertListAlmostEqual( snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps ) self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = fn def __call__( self , *snake_case , **snake_case ): '''simple docstring''' return self.fn(*snake_case , **snake_case ) @classmethod def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
311
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict a : Any = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = _TestCommandArgs(dataset=__magic_name__ , all_configs=__magic_name__ , save_infos=__magic_name__ ) UpperCAmelCase : Any = TestCommand(*__magic_name__ ) test_command.run() UpperCAmelCase : List[Any] = os.path.join(__magic_name__ , "README.md" ) assert os.path.exists(__magic_name__ ) UpperCAmelCase : Dict = DatasetInfosDict.from_directory(__magic_name__ ) UpperCAmelCase : List[str] = DatasetInfosDict( { "default": DatasetInfo( features=Features( { "tokens": Sequence(Value("string" ) ), "ner_tags": Sequence( ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ), "langs": Sequence(Value("string" ) ), "spans": Sequence(Value("string" ) ), } ) , splits=[ { "name": "train", "num_bytes": 235_1563, "num_examples": 1_0000, }, { "name": "validation", "num_bytes": 23_8418, "num_examples": 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: UpperCAmelCase , UpperCAmelCase : Dict = getattr(dataset_infos["default"] , __magic_name__ ), getattr(expected_dataset_infos["default"] , __magic_name__ ) if key == "num_bytes": assert is_apercent_close(__magic_name__ , __magic_name__ ) elif key == "splits": assert list(__magic_name__ ) == list(__magic_name__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
311
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Tuple = "T5Config" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ ) UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ ) UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ ) return shifted_input_ids class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : Dict = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "mt5" SCREAMING_SNAKE_CASE__ : str = MTaConfig
311
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ["speech"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["speech"] ) class UpperCamelCase__ ( metaclass=lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["speech"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["speech"] )
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) <= 1: return [tuple(__magic_name__ )] UpperCAmelCase : int = [] def generate(__magic_name__ , __magic_name__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , __magic_name__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even UpperCAmelCase , UpperCAmelCase : Optional[int] = arr[k - 1], arr[i] else: # k is odd UpperCAmelCase , UpperCAmelCase : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , __magic_name__ ) generate(len(__magic_name__ ) , __magic_name__ ) return res if __name__ == "__main__": a : List[str] = input("Enter numbers separated by a comma:\n").strip() a : Dict = [int(item) for item in user_input.split(",")] print(heaps(arr))
311
'''simple docstring''' from functools import lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : str = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__magic_name__ ) if n > 1: factors.add(__magic_name__ ) return factors @lru_cache def lowercase ( __magic_name__ ): '''simple docstring''' return len(unique_prime_factors(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' return len(set(__magic_name__ ) ) in (0, 1) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = 2 while True: # Increment each value of a generated range UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group] checker.append(__magic_name__ ) # If all numbers in the list are equal, return the group variable. if equality(__magic_name__ ): return group # Increment our base variable by 1 base += 1 def lowercase ( __magic_name__ = 4 ): '''simple docstring''' UpperCAmelCase : int = run(__magic_name__ ) return results[0] if len(__magic_name__ ) else None if __name__ == "__main__": print(solution())
311
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : List[Any] = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "vivit" def __init__( self , snake_case=2_2_4 , snake_case=3_2 , snake_case=[2, 1_6, 1_6] , snake_case=3 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu_fast" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1e-06 , snake_case=True , **snake_case , ): '''simple docstring''' UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : Tuple = num_attention_heads UpperCAmelCase : Optional[int] = intermediate_size UpperCAmelCase : Dict = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Dict = layer_norm_eps UpperCAmelCase : Optional[int] = image_size UpperCAmelCase : List[str] = num_frames UpperCAmelCase : Union[str, Any] = tubelet_size UpperCAmelCase : Any = num_channels UpperCAmelCase : Dict = qkv_bias super().__init__(**snake_case )
311
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : Union[str, Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : List[Any] = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = "swin2sr" SCREAMING_SNAKE_CASE__ : List[Any] = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case=6_4 , snake_case=1 , snake_case=3 , snake_case=1_8_0 , snake_case=[6, 6, 6, 6, 6, 6] , snake_case=[6, 6, 6, 6, 6, 6] , snake_case=8 , snake_case=2.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1e-5 , snake_case=2 , snake_case=1.0 , snake_case="1conv" , snake_case="pixelshuffle" , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : Optional[Any] = image_size UpperCAmelCase : List[Any] = patch_size UpperCAmelCase : List[str] = num_channels UpperCAmelCase : Any = embed_dim UpperCAmelCase : List[str] = depths UpperCAmelCase : Any = len(snake_case ) UpperCAmelCase : int = num_heads UpperCAmelCase : int = window_size UpperCAmelCase : Dict = mlp_ratio UpperCAmelCase : Optional[Any] = qkv_bias UpperCAmelCase : Optional[int] = hidden_dropout_prob UpperCAmelCase : Dict = attention_probs_dropout_prob UpperCAmelCase : Optional[int] = drop_path_rate UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : Dict = use_absolute_embeddings UpperCAmelCase : Dict = layer_norm_eps UpperCAmelCase : Dict = initializer_range UpperCAmelCase : int = upscale UpperCAmelCase : Any = img_range UpperCAmelCase : Optional[Any] = resi_connection UpperCAmelCase : Tuple = upsampler
311
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Optional[int] = { "sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = "vit_msn" def __init__( self , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1e-06 , snake_case=2_2_4 , snake_case=1_6 , snake_case=3 , snake_case=True , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : List[str] = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : int = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : Tuple = attention_probs_dropout_prob UpperCAmelCase : Any = initializer_range UpperCAmelCase : Dict = layer_norm_eps UpperCAmelCase : Dict = image_size UpperCAmelCase : int = patch_size UpperCAmelCase : Any = num_channels UpperCAmelCase : str = qkv_bias
311
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a : Optional[int] = _symbol_database.Default() a : Any = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) a : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: a : str = None a : Optional[Any] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a : str = 45 a : Any = 15_81 a : List[Any] = 15_17 a : Union[str, Any] = 15_70 a : Optional[Any] = 15_84 a : List[str] = 17_93 a : Optional[Any] = 17_95 a : Tuple = 19_16 a : Optional[Any] = 18_64 a : int = 19_05 a : Optional[Any] = 19_19 a : Union[str, Any] = 24_29 a : List[Any] = 22_08 a : Dict = 24_18 a : Optional[int] = 23_23 a : str = 24_07 # @@protoc_insertion_point(module_scope)
311
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a : Optional[int] = logging.get_logger(__name__) a : Optional[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } a : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = {} with open(__magic_name__ , "r" ) as file: for line_number, line in enumerate(__magic_name__ ): UpperCAmelCase : Optional[Any] = line.strip() if line: UpperCAmelCase : Optional[Any] = line.split() UpperCAmelCase : Any = line_number UpperCAmelCase : Optional[Any] = words[0] UpperCAmelCase : List[Any] = value return result def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' for attribute in key.split("." ): UpperCAmelCase : str = getattr(__magic_name__ , __magic_name__ ) UpperCAmelCase : str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__magic_name__ ): UpperCAmelCase : Dict = PARAM_MAPPING[full_name.split("." )[-1]] UpperCAmelCase : str = "param" if weight_type is not None and weight_type != "param": UpperCAmelCase : int = getattr(__magic_name__ , __magic_name__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase : List[str] = hf_pointer for attribute in hf_param_name.split("." ): UpperCAmelCase : Optional[Any] = getattr(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = shape_pointer.shape # let's reduce dimension UpperCAmelCase : Optional[int] = value[0] else: UpperCAmelCase : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": UpperCAmelCase : List[str] = value elif weight_type == "weight_g": UpperCAmelCase : List[str] = value elif weight_type == "weight_v": UpperCAmelCase : Optional[int] = value elif weight_type == "bias": UpperCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split("." ): UpperCAmelCase : Tuple = getattr(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[Any] = value else: UpperCAmelCase : str = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__magic_name__ ): UpperCAmelCase : Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]] UpperCAmelCase : int = "param" if weight_type is not None and weight_type != "param": UpperCAmelCase : Tuple = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase : List[Any] = ".".join([key, hf_param_name] ) else: UpperCAmelCase : Optional[int] = key UpperCAmelCase : Optional[int] = value if "lm_head" in full_key else value[0] a : Dict = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Optional[Any] = False for key, mapped_key in MAPPING.items(): UpperCAmelCase : int = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCAmelCase : Tuple = True if "*" in mapped_key: UpperCAmelCase : Dict = name.split(__magic_name__ )[0].split("." )[-2] UpperCAmelCase : Optional[Any] = mapped_key.replace("*" , __magic_name__ ) if "weight_g" in name: UpperCAmelCase : Dict = "weight_g" elif "weight_v" in name: UpperCAmelCase : Optional[int] = "weight_v" elif "bias" in name: UpperCAmelCase : Optional[int] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase : Any = "weight" else: UpperCAmelCase : int = None if hf_dict is not None: rename_dict(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return is_used return is_used def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : str = fairseq_model.state_dict() UpperCAmelCase : int = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase : Dict = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase : List[str] = True else: UpperCAmelCase : int = load_wavaveca_layer(__magic_name__ , __magic_name__ , __magic_name__ ) if not is_used: unused_weights.append(__magic_name__ ) logger.warning(F"Unused weights: {unused_weights}" ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = full_name.split("conv_layers." )[-1] UpperCAmelCase : Union[str, Any] = name.split("." ) UpperCAmelCase : int = int(items[0] ) UpperCAmelCase : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) UpperCAmelCase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) UpperCAmelCase : int = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) UpperCAmelCase : Optional[int] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) UpperCAmelCase : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__=False ): '''simple docstring''' if config_path is not None: UpperCAmelCase : Optional[Any] = WavaVecaConfig.from_pretrained(__magic_name__ ) else: UpperCAmelCase : int = WavaVecaConfig() if is_seq_class: UpperCAmelCase : List[Any] = read_txt_into_dict(__magic_name__ ) UpperCAmelCase : List[str] = idalabel UpperCAmelCase : str = WavaVecaForSequenceClassification(__magic_name__ ) UpperCAmelCase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) feature_extractor.save_pretrained(__magic_name__ ) elif is_finetuned: if dict_path: UpperCAmelCase : Optional[int] = Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase : int = target_dict.pad_index UpperCAmelCase : Union[str, Any] = target_dict.bos_index UpperCAmelCase : int = target_dict.eos_index UpperCAmelCase : Optional[Any] = len(target_dict.symbols ) UpperCAmelCase : int = os.path.join(__magic_name__ , "vocab.json" ) if not os.path.isdir(__magic_name__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) UpperCAmelCase : Union[str, Any] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase : List[str] = 0 UpperCAmelCase : str = 1 with open(__magic_name__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__magic_name__ , ) UpperCAmelCase : str = True if config.feat_extract_norm == "layer" else False UpperCAmelCase : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) UpperCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) UpperCAmelCase : str = WavaVecaForCTC(__magic_name__ ) else: UpperCAmelCase : Any = WavaVecaForPreTraining(__magic_name__ ) if is_finetuned or is_seq_class: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: UpperCAmelCase : Tuple = argparse.Namespace(task="audio_pretraining" ) UpperCAmelCase : List[Any] = fairseq.tasks.setup_task(__magic_name__ ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) UpperCAmelCase : int = model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) a : List[str] = parser.parse_args() a : int = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
311
'''simple docstring''' import argparse import copy def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = {} with open(__magic_name__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCAmelCase : List[Any] = [] _list.append([line.split()[1], line.split()[2]] ) UpperCAmelCase : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: UpperCAmelCase : Any = [] _list.append([line.split()[0], line.split()[2]] ) UpperCAmelCase : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' with open(__magic_name__ ) as f: UpperCAmelCase : List[str] = f.read(1 ) UpperCAmelCase : List[Any] = start_node UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Any = start_node UpperCAmelCase : Optional[Any] = 0 while visiting not in first_solution: UpperCAmelCase : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution: UpperCAmelCase : Tuple = k[1] UpperCAmelCase : Dict = k[0] first_solution.append(__magic_name__ ) UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ ) UpperCAmelCase : str = best_node first_solution.append(__magic_name__ ) UpperCAmelCase : int = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCAmelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for n in solution[1:-1]: UpperCAmelCase : Any = solution.index(__magic_name__ ) for kn in solution[1:-1]: UpperCAmelCase : Dict = solution.index(__magic_name__ ) if n == kn: continue UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ ) UpperCAmelCase : Optional[int] = kn UpperCAmelCase : List[str] = n UpperCAmelCase : str = 0 for k in _tmp[:-1]: UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCAmelCase : List[Any] = distance + int(i[1] ) _tmp.append(__magic_name__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[str] = first_solution UpperCAmelCase : str = [] UpperCAmelCase : Union[str, Any] = distance_of_first_solution UpperCAmelCase : Union[str, Any] = solution while count <= iters: UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = 0 UpperCAmelCase : List[str] = neighborhood[index_of_best_solution] UpperCAmelCase : Dict = len(__magic_name__ ) - 1 UpperCAmelCase : Dict = False while not found: UpperCAmelCase : List[Any] = 0 while i < len(__magic_name__ ): if best_solution[i] != solution[i]: UpperCAmelCase : int = best_solution[i] UpperCAmelCase : Optional[int] = solution[i] break UpperCAmelCase : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) UpperCAmelCase : List[str] = True UpperCAmelCase : List[Any] = best_solution[:-1] UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCAmelCase : Union[str, Any] = cost UpperCAmelCase : Tuple = solution else: UpperCAmelCase : Optional[Any] = index_of_best_solution + 1 UpperCAmelCase : str = neighborhood[index_of_best_solution] if len(__magic_name__ ) >= size: tabu_list.pop(0 ) UpperCAmelCase : int = count + 1 return best_solution_ever, best_cost def lowercase ( __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = generate_neighbours(args.File ) UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution( args.File , __magic_name__ ) UpperCAmelCase , UpperCAmelCase : Any = tabu_search( __magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
311
1
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 a : Optional[int] = get_tests_dir("fixtures/dummy-config.json") class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = 0 def A_ ( self ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = AutoConfig.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = AutoConfig.for_model("roberta" ) self.assertIsInstance(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCAmelCase : Tuple = os.path.join(snake_case , "fake-roberta" ) os.makedirs(snake_case , exist_ok=snake_case ) with open(os.path.join(snake_case , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(snake_case ) self.assertEqual(type(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' try: AutoConfig.register("custom" , snake_case ) # Wrong model type will raise an error with self.assertRaises(snake_case ): AutoConfig.register("model" , snake_case ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case ): AutoConfig.register("bert" , snake_case ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase : int = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(snake_case ) UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def A_ ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case , "bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase : str = AutoConfig.from_pretrained("bert-base" ) def A_ ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case , revision="aaaaaa" ) def A_ ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): UpperCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def A_ ( self ): '''simple docstring''' with self.assertRaises(snake_case ): UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case ): UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case ) UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(snake_case ) UpperCAmelCase : Tuple = AutoConfig.from_pretrained(snake_case , trust_remote_code=snake_case ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def A_ ( self ): '''simple docstring''' class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "new-model" try: AutoConfig.register("new-model" , snake_case ) # If remote code is not set, the default is to use local UpperCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
311
'''simple docstring''' from collections.abc import Generator from math import sin def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) != 32: raise ValueError("Input must be of length 32" ) UpperCAmelCase : Union[str, Any] = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:] UpperCAmelCase : List[str] = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = b"" for char in message: bit_string += format(__magic_name__ , "08b" ).encode("utf-8" ) UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def lowercase ( __magic_name__ ): '''simple docstring''' if len(__magic_name__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512] UpperCAmelCase : Tuple = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def lowercase ( __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) UpperCAmelCase : Any = format(__magic_name__ , "032b" ) UpperCAmelCase : int = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return (a + b) % 2**32 def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = preprocess(__magic_name__ ) UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCAmelCase : List[str] = 0X67452301 UpperCAmelCase : Tuple = 0XEFCDAB89 UpperCAmelCase : List[Any] = 0X98BADCFE UpperCAmelCase : List[str] = 0X10325476 UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : List[Any] = ba UpperCAmelCase : Optional[Any] = ca UpperCAmelCase : Any = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : List[str] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : int = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 16 elif i <= 47: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16 else: UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ )) UpperCAmelCase : Dict = (7 * i) % 16 UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCAmelCase : List[Any] = d UpperCAmelCase : Any = c UpperCAmelCase : Dict = b UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
311
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = AltDiffusionPipeline SCREAMING_SNAKE_CASE__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS def A_ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) UpperCAmelCase : Any = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) UpperCAmelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , ) UpperCAmelCase : Tuple = CLIPTextModel(snake_case ) UpperCAmelCase : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) UpperCAmelCase : Dict = 7_7 UpperCAmelCase : int = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def A_ ( self , snake_case , snake_case=0 ): '''simple docstring''' if str(snake_case ).startswith("mps" ): UpperCAmelCase : int = torch.manual_seed(snake_case ) else: UpperCAmelCase : Any = torch.Generator(device=snake_case ).manual_seed(snake_case ) UpperCAmelCase : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def A_ ( self ): '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def A_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Dict = self.get_dummy_components() torch.manual_seed(0 ) UpperCAmelCase : List[str] = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , ) # TODO: remove after fixing the non-deterministic text encoder UpperCAmelCase : int = RobertaSeriesModelWithTransformation(snake_case ) UpperCAmelCase : List[str] = text_encoder UpperCAmelCase : str = AltDiffusionPipeline(**snake_case ) UpperCAmelCase : List[str] = alt_pipe.to(snake_case ) alt_pipe.set_progress_bar_config(disable=snake_case ) UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case ) UpperCAmelCase : str = "A photo of an astronaut" UpperCAmelCase : Optional[Any] = alt_pipe(**snake_case ) UpperCAmelCase : Optional[int] = output.images UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase : Union[str, Any] = np.array( [0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Union[str, Any] = self.get_dummy_components() UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=snake_case ) torch.manual_seed(0 ) UpperCAmelCase : List[str] = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , ) # TODO: remove after fixing the non-deterministic text encoder UpperCAmelCase : int = RobertaSeriesModelWithTransformation(snake_case ) UpperCAmelCase : Optional[Any] = text_encoder UpperCAmelCase : List[str] = AltDiffusionPipeline(**snake_case ) UpperCAmelCase : Dict = alt_pipe.to(snake_case ) alt_pipe.set_progress_bar_config(disable=snake_case ) UpperCAmelCase : List[str] = self.get_dummy_inputs(snake_case ) UpperCAmelCase : Union[str, Any] = alt_pipe(**snake_case ) UpperCAmelCase : Optional[int] = output.images UpperCAmelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase : str = np.array( [0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=snake_case ) UpperCAmelCase : List[Any] = alt_pipe.to(snake_case ) alt_pipe.set_progress_bar_config(disable=snake_case ) UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger" UpperCAmelCase : int = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = alt_pipe([prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" ) UpperCAmelCase : Dict = output.images UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase : Any = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" ) UpperCAmelCase : Any = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=snake_case , safety_checker=snake_case ) UpperCAmelCase : Dict = alt_pipe.to(snake_case ) alt_pipe.set_progress_bar_config(disable=snake_case ) UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger" UpperCAmelCase : Tuple = torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = alt_pipe([prompt] , generator=snake_case , num_inference_steps=2 , output_type="numpy" ) UpperCAmelCase : Any = output.images UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase : Optional[int] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
311
'''simple docstring''' a : List[str] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
311
1