code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[Any] = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class a ( __snake_case ): SCREAMING_SNAKE_CASE : Any = 'gptj' SCREAMING_SNAKE_CASE : List[str] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=50400 , __SCREAMING_SNAKE_CASE : List[str]=2048 , __SCREAMING_SNAKE_CASE : Dict=4096 , __SCREAMING_SNAKE_CASE : Union[str, Any]=28 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu_new" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Dict=1e-5 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[Any]=50256 , __SCREAMING_SNAKE_CASE : int=50256 , __SCREAMING_SNAKE_CASE : List[Any]=False , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Tuple: lowerCamelCase_ = vocab_size lowerCamelCase_ = n_positions lowerCamelCase_ = n_embd lowerCamelCase_ = n_layer lowerCamelCase_ = n_head lowerCamelCase_ = n_inner lowerCamelCase_ = rotary_dim lowerCamelCase_ = activation_function lowerCamelCase_ = resid_pdrop lowerCamelCase_ = embd_pdrop lowerCamelCase_ = attn_pdrop lowerCamelCase_ = layer_norm_epsilon lowerCamelCase_ = initializer_range lowerCamelCase_ = use_cache lowerCamelCase_ = bos_token_id lowerCamelCase_ = eos_token_id super().__init__( bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class a ( __snake_case ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple = "default" , __SCREAMING_SNAKE_CASE : Optional[Any] = None , __SCREAMING_SNAKE_CASE : Tuple = False , ) -> List[str]: super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config , 'pad_token_id' , __SCREAMING_SNAKE_CASE ): # TODO: how to do that better? lowerCamelCase_ = 0 @property def UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='inputs' ) lowerCamelCase_ = {0: 'batch', 1: 'past_sequence + sequence'} else: lowerCamelCase_ = {0: 'batch', 1: 'sequence'} return common_inputs @property def UpperCamelCase ( self : Dict ) -> int: return self._config.n_layer @property def UpperCamelCase ( self : Union[str, Any] ) -> int: return self._config.n_head def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : Optional[int] = False , __SCREAMING_SNAKE_CASE : int = None , ) -> Mapping[str, Any]: lowerCamelCase_ = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() lowerCamelCase_ = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCamelCase_ , lowerCamelCase_ = common_inputs['input_ids'].shape # Not using the same length for past_key_values lowerCamelCase_ = seqlen + 2 lowerCamelCase_ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase_ = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] lowerCamelCase_ = common_inputs['attention_mask'] if self.use_past: lowerCamelCase_ = ordered_inputs['attention_mask'].dtype lowerCamelCase_ = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def UpperCamelCase ( self : str ) -> int: return 13
183
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } lowercase_ = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowerCAmelCase (__A , __A , __A , __A , __A , __A): """simple docstring""" for attribute in key.split('''.'''): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models _a = '''lm_head''' _a = getattr(__A , __A) if weight_type is not None: _a = getattr(__A , __A).shape else: _a = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value else: _a = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''') def lowerCAmelCase (__A , __A , __A): """simple docstring""" _a = [] _a = fairseq_model.state_dict() _a = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): _a = False if "conv_layers" in name: load_conv_layer( __A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , ) _a = True else: for key, mapped_key in MAPPING.items(): _a = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]: _a = True if "*" in mapped_key: _a = name.split(__A)[0].split('''.''')[-2] _a = mapped_key.replace('''*''' , __A) if "weight_g" in name: _a = '''weight_g''' elif "weight_v" in name: _a = '''weight_v''' elif "bias" in name: _a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _a = '''weight''' else: _a = None set_recursively(__A , __A , __A , __A , __A , __A) continue if not is_used: unused_weights.append(__A) logger.warning(F'''Unused weights: {unused_weights}''') def lowerCAmelCase (__A , __A , __A , __A , __A): """simple docstring""" _a = full_name.split('''conv_layers.''')[-1] _a = name.split('''.''') _a = int(items[0]) _a = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _a = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _a = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _a = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _a = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(__A) @torch.no_grad() def lowerCAmelCase (__A , __A , __A=None , __A=None , __A=True): """simple docstring""" if config_path is not None: _a = UniSpeechConfig.from_pretrained(__A) else: _a = UniSpeechConfig() if is_finetuned: if dict_path: _a = Dictionary.load_from_json(__A) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a = target_dict.pad_index _a = target_dict.bos_index _a = target_dict.eos_index _a = len(target_dict.symbols) _a = os.path.join(__A , '''vocab.json''') if not os.path.isdir(__A): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A)) return os.makedirs(__A , exist_ok=__A) _a = target_dict.indices # fairseq has the <pad> and <s> switched _a = 42 _a = 43 with open(__A , '''w''' , encoding='''utf-8''') as vocab_handle: json.dump(__A , __A) _a = WavaVecaPhonemeCTCTokenizer( __A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , ) _a = True if config.feat_extract_norm == '''layer''' else False _a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , ) _a = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A) processor.save_pretrained(__A) _a = UniSpeechForCTC(__A) else: _a = UniSpeechForPreTraining(__A) if is_finetuned: _a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1]), '''w2v_path''': checkpoint_path}) else: _a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) _a = model[0].eval() recursively_load_weights(__A , __A , __A) hf_unispeech.save_pretrained(__A) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowercase_ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
211
0
'''simple docstring''' def __UpperCamelCase ( _UpperCAmelCase = 4000000 ): __UpperCAmelCase : Dict = [] __UpperCAmelCase , __UpperCAmelCase : List[str] = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(f"{solution() = }")
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ : Dict = logging.get_logger(__name__) lowerCAmelCase__ : int = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class SCREAMING_SNAKE_CASE__ ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE = '''fnet''' def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ): """simple docstring""" super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Union[str, Any] = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations __UpperCAmelCase : List[Any] = tpu_short_seq_length
37
1
'''simple docstring''' def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: int ): return base * power(_lowerCamelCase , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') UpperCamelCase__ : Optional[int] = int(input('''Enter the base: ''').strip()) UpperCamelCase__ : Tuple = int(input('''Enter the exponent: ''').strip()) UpperCamelCase__ : str = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents UpperCamelCase__ : Tuple = 1 / result print(f"{base} to the power of {exponent} is {result}")
112
from __future__ import annotations import math lowercase : Any = '2020.9.26' lowercase : Union[str, Any] = 'xcodz-dot, cclaus, dhruvmanila' def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float) -> tuple[float, float]: '''simple docstring''' if not all(isinstance(_lowerCamelCase , (float, int)) for val in locals().values()): __UpperCamelCase : str = F'Input values must either be float or int: {list(locals().values())}' raise TypeError(_lowerCamelCase) __UpperCamelCase : List[str] = ((x * distance) / (z + distance)) * scale __UpperCamelCase : List[Any] = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : float) -> tuple[float, float, float]: '''simple docstring''' if not isinstance(_lowerCamelCase , _lowerCamelCase): raise TypeError("Axis must be a str") __UpperCamelCase : str = locals() del input_variables["axis"] if not all(isinstance(_lowerCamelCase , (float, int)) for val in input_variables.values()): __UpperCamelCase : Dict = ( "Input values except axis must either be float or int: " F'{list(input_variables.values())}' ) raise TypeError(_lowerCamelCase) __UpperCamelCase : Optional[Any] = (angle % 360) / 450 * 180 / math.pi if axis == "z": __UpperCamelCase : Tuple = x * math.cos(_lowerCamelCase) - y * math.sin(_lowerCamelCase) __UpperCamelCase : Union[str, Any] = y * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase) __UpperCamelCase : Any = z elif axis == "x": __UpperCamelCase : Dict = y * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase) __UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + y * math.sin(_lowerCamelCase) __UpperCamelCase : List[str] = x elif axis == "y": __UpperCamelCase : Any = x * math.cos(_lowerCamelCase) - z * math.sin(_lowerCamelCase) __UpperCamelCase : Any = z * math.cos(_lowerCamelCase) + x * math.sin(_lowerCamelCase) __UpperCamelCase : Dict = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'") return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }") print(f"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }")
232
0
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowerCamelCase = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowerCamelCase = logging.get_logger(__name__) class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = '''maskformer''' UpperCamelCase = {'''hidden_size''': '''mask_feature_size'''} UpperCamelCase = ['''resnet''', '''swin'''] UpperCamelCase = ['''detr'''] def __init__( self : Optional[int] , _UpperCAmelCase : int = 256 , _UpperCAmelCase : int = 256 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 20.0 , _UpperCAmelCase : Optional[bool] = None , **_UpperCAmelCase : Dict , ) -> Union[str, Any]: '''simple docstring''' if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k UpperCAmelCase_ = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase_ = backbone_config.pop("model_type" ) UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ F"""Supported model types: {','.join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 UpperCAmelCase_ = DetrConfig() else: # verify that the decoder is supported UpperCAmelCase_ = ( decoder_config.pop("model_type" ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"""Transformer Decoder {decoder_type} not supported, please use one of""" F""" {','.join(self.decoders_supported )}""" ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase_ = CONFIG_MAPPING[decoder_type] UpperCAmelCase_ = config_class.from_dict(_UpperCAmelCase ) UpperCAmelCase_ = backbone_config UpperCAmelCase_ = decoder_config # main feature dimension for the model UpperCAmelCase_ = fpn_feature_size UpperCAmelCase_ = mask_feature_size # initializer UpperCAmelCase_ = init_std UpperCAmelCase_ = init_xavier_std # Hungarian matcher && loss UpperCAmelCase_ = cross_entropy_weight UpperCAmelCase_ = dice_weight UpperCAmelCase_ = mask_weight UpperCAmelCase_ = use_auxiliary_loss UpperCAmelCase_ = no_object_weight UpperCAmelCase_ = output_auxiliary_logits UpperCAmelCase_ = self.decoder_config.encoder_attention_heads UpperCAmelCase_ = self.decoder_config.num_hidden_layers super().__init__(**_UpperCAmelCase ) @classmethod def lowercase__ ( cls : Any , _UpperCAmelCase : PretrainedConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : List[Any] ) -> Optional[int]: '''simple docstring''' return cls( backbone_config=_UpperCAmelCase , decoder_config=_UpperCAmelCase , **_UpperCAmelCase , ) def lowercase__ ( self : List[str] ) -> Dict[str, any]: '''simple docstring''' UpperCAmelCase_ = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ = self.backbone_config.to_dict() UpperCAmelCase_ = self.decoder_config.to_dict() UpperCAmelCase_ = self.__class__.model_type return output
370
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = '''rwkv''' UpperCamelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : str , _UpperCAmelCase : int=50277 , _UpperCAmelCase : Optional[Any]=1024 , _UpperCAmelCase : str=4096 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=1e-5 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=6 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Optional[Any] , ) -> str: '''simple docstring''' UpperCAmelCase_ = vocab_size UpperCAmelCase_ = context_length UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase_ = layer_norm_epsilon UpperCAmelCase_ = rescale_every UpperCAmelCase_ = use_cache UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = eos_token_id super().__init__( tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
241
0
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def a_ ( ) -> tuple[list[int], int]: """simple docstring""" lowerCamelCase_ =[randint(-1000 , 1000 ) for i in range(10 )] lowerCamelCase_ =randint(-5000 , 5000 ) return (arr, r) a_ : Dict = make_dataset() def a_ ( __snake_case : list[int] , __snake_case : int ) -> tuple[int, ...]: """simple docstring""" for triplet in permutations(__snake_case , 3 ): if sum(__snake_case ) == target: return tuple(sorted(__snake_case ) ) return (0, 0, 0) def a_ ( __snake_case : list[int] , __snake_case : int ) -> tuple[int, int, int]: """simple docstring""" arr.sort() lowerCamelCase_ =len(__snake_case ) for i in range(n - 1 ): lowerCamelCase_, lowerCamelCase_ =i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def a_ ( ) -> tuple[float, float]: """simple docstring""" lowerCamelCase_ =''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' lowerCamelCase_ =''' triplet_sum1(*dataset) ''' lowerCamelCase_ =''' triplet_sum2(*dataset) ''' lowerCamelCase_ =repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=1_0000 ) lowerCamelCase_ =repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=1_0000 ) return (min(__snake_case ), min(__snake_case )) if __name__ == "__main__": from doctest import testmod testmod() a_ : List[str] = solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
75
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration snake_case_ : Union[str, Any] = 50_00_00 snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__) snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str: UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ ) @get_duration def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) UpperCAmelCase_ : Dict = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ ) def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return tokenizer(examples['''text'''] ) UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''numpy''' ): UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''pandas''' ): UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''torch''', columns='''numbers''' ): UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ): UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
125
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def _snake_case ( A , A=False ) -> Optional[Any]: try: lowerCAmelCase__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCAmelCase__ = default else: # KEY is set, convert it to True or False. try: lowerCAmelCase__ = strtobool(snake_case__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value __UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def _snake_case ( A ) -> str: return unittest.skip('''Test was skipped''' )(snake_case__ ) def _snake_case ( A ) -> Optional[Any]: return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(snake_case__ ) def _snake_case ( A ) -> Dict: return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(snake_case__ ) def _snake_case ( A ) -> Dict: return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(snake_case__ ) def _snake_case ( A ) -> List[Any]: return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(snake_case__ ) def _snake_case ( A ) -> List[Any]: return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(snake_case__ ) def _snake_case ( A ) -> Tuple: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(snake_case__ ) def _snake_case ( A ) -> List[Any]: return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(snake_case__ ) def _snake_case ( A ) -> List[str]: return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(snake_case__ ) def _snake_case ( A ) -> Optional[int]: return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(snake_case__ ) def _snake_case ( A ) -> Dict: return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(snake_case__ ) def _snake_case ( A ) -> Any: return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(snake_case__ ) def _snake_case ( A ) -> int: return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(snake_case__ ) def _snake_case ( A ) -> Any: return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(snake_case__ ) def _snake_case ( A ) -> Dict: return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(snake_case__ ) def _snake_case ( A ) -> Optional[Any]: return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(snake_case__ ) def _snake_case ( A=None , A=None ) -> Optional[Any]: if test_case is None: return partial(snake_case__ , version=snake_case__ ) return unittest.skipUnless(is_torch_version('''>=''' , snake_case__ ) , F"""test requires torch version >= {version}""" )(snake_case__ ) def _snake_case ( A ) -> Optional[Any]: return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(snake_case__ ) def _snake_case ( A ) -> Tuple: return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(snake_case__ ) def _snake_case ( A ) -> List[Any]: return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(snake_case__ ) __UpperCAmelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def _snake_case ( A ) -> Optional[Any]: return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(snake_case__ ) class a__ ( unittest.TestCase ): '''simple docstring''' lowercase__ : Optional[Any] = True @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> Any: lowerCAmelCase__ = tempfile.mkdtemp() @classmethod def __SCREAMING_SNAKE_CASE ( cls ) -> Tuple: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __SCREAMING_SNAKE_CASE ( self ) -> str: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(SCREAMING_SNAKE_CASE_ ) class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class a__ ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple: lowerCAmelCase__ = mocks if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def _snake_case ( A ) -> Optional[int]: lowerCAmelCase__ = AcceleratorState() lowerCAmelCase__ = tensor[None].clone().to(state.device ) lowerCAmelCase__ = gather(snake_case__ ).cpu() lowerCAmelCase__ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , snake_case__ ): return False return True class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: lowerCAmelCase__ = returncode lowerCAmelCase__ = stdout lowerCAmelCase__ = stderr async def _snake_case ( A , A ) -> Union[str, Any]: while True: lowerCAmelCase__ = await stream.readline() if line: callback(snake_case__ ) else: break async def _snake_case ( A , A=None , A=None , A=None , A=False , A=False ) -> _RunOutput: if echo: print('''\nRunning: ''' , ''' '''.join(snake_case__ ) ) lowerCAmelCase__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCAmelCase__ = [] lowerCAmelCase__ = [] def tee(A , A , A , A="" ): lowerCAmelCase__ = line.decode('''utf-8''' ).rstrip() sink.append(snake_case__ ) if not quiet: print(snake_case__ , snake_case__ , file=snake_case__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda A : tee(snake_case__ , snake_case__ , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda A : tee(snake_case__ , snake_case__ , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=snake_case__ , ) return _RunOutput(await p.wait() , snake_case__ , snake_case__ ) def _snake_case ( A , A=None , A=None , A=180 , A=False , A=True ) -> _RunOutput: lowerCAmelCase__ = asyncio.get_event_loop() lowerCAmelCase__ = loop.run_until_complete( _stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) ) lowerCAmelCase__ = ' '.join(snake_case__ ) if result.returncode > 0: lowerCAmelCase__ = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class a__ ( a__ ): '''simple docstring''' pass def _snake_case ( A , A=False ) -> int: try: lowerCAmelCase__ = subprocess.check_output(snake_case__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(snake_case__ , '''decode''' ): lowerCAmelCase__ = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{" ".join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
365
'''simple docstring''' import argparse import json import subprocess def _snake_case ( A , A ) -> Tuple: lowerCAmelCase__ = [] lowerCAmelCase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ''' https://api.github.com/repos/huggingface/transformers/actions/runners''' ) lowerCAmelCase__ = subprocess.run(A , shell=A , stdout=subprocess.PIPE ) lowerCAmelCase__ = output.stdout.decode('''utf-8''' ) lowerCAmelCase__ = json.loads(A ) lowerCAmelCase__ = status['''runners'''] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(A ) # save the result so we can report them on Slack with open('''offline_runners.txt''' , '''w''' ) as fp: fp.write(json.dumps(A ) ) if len(A ) > 0: lowerCAmelCase__ = '''\n'''.join([x['''name'''] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def _snake_case ( A ) -> Optional[Any]: return values.split(''',''' ) __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) __UpperCAmelCase = parser.parse_args() get_runner_status(args.target_runners, args.token)
228
0
import os import jsonlines import numpy as np from tqdm import tqdm A__ = 2048 A__ = 4096 A__ = 42 A__ = os.environ.pop('''PROCESS_TRAIN''', '''false''') A__ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple: """simple docstring""" def choose_first(__lowerCAmelCase , __lowerCAmelCase=False ): assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: snake_case__ : Tuple = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: snake_case__ : Optional[Any] = {k: [a[k]] for k in a} if len(a['''start_token'''] ) > 0: break return a snake_case__ : List[str] = {'''id''': example['''id''']} snake_case__ : str = example['''annotations'''] snake_case__ : List[str] = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: snake_case__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no'''] snake_case__ : Tuple = [] snake_case__ : Optional[Any] = [] snake_case__ : Dict = ['''<cls>'''] else: snake_case__ : List[Any] = ['''short'''] snake_case__ : int = choose_first(annotation['''short_answers'''] ) if len(out['''start_token'''] ) == 0: # answer will be long if short is not available snake_case__ : Any = ['''long'''] snake_case__ : Optional[int] = choose_first(annotation['''long_answer'''] , is_long_answer=__lowerCAmelCase ) snake_case__ : List[Any] = [] answer.update(__lowerCAmelCase ) # disregard some samples if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]: snake_case__ : List[Any] = True else: snake_case__ : List[Any] = False snake_case__ : List[str] = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] , __lowerCAmelCase ) for k in cols ): raise ValueError('''Issue in ID''' , example['''id'''] ) return answer def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Union[str, Any]: """simple docstring""" snake_case__ : Optional[Any] = _get_single_answer(__lowerCAmelCase ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case__ : List[Any] = example['''document''']['''tokens'''] snake_case__ : Union[str, Any] = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) return { "context": " ".join(__lowerCAmelCase ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples snake_case__ : int = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 snake_case__ : Optional[int] = example['''document''']['''tokens'''] snake_case__ : str = answer['''start_token'''] snake_case__ : int = answer['''end_token'''] snake_case__ : List[str] = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 snake_case__ : Optional[Any] = ''' '''.join(context[start_token:end_token] ) # checking above code if assertion: snake_case__ : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] snake_case__ : str = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] snake_case__ : Optional[Any] = ''' '''.join([old[i] for i in range(len(__lowerCAmelCase ) ) if not is_html[i]] ) if new != old: print('''ID:''' , example['''id'''] ) print('''New:''' , __lowerCAmelCase , end='''\n''' ) print('''Old:''' , __lowerCAmelCase , end='''\n\n''' ) return { "context": " ".join(__lowerCAmelCase ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=2048 , __lowerCAmelCase=4096 , __lowerCAmelCase=True ) -> int: """simple docstring""" snake_case__ : int = get_context_and_ans(__lowerCAmelCase , assertion=__lowerCAmelCase ) snake_case__ : List[Any] = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } snake_case__ : Optional[Any] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids snake_case__ : Union[str, Any] = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case__ : Optional[Any] = [] snake_case__ : Optional[Any] = [] snake_case__ : int = input_ids[:q_len] snake_case__ : Union[str, Any] = range(__lowerCAmelCase , len(__lowerCAmelCase ) , max_length - doc_stride ) for i in doc_start_indices: snake_case__ : Tuple = i + max_length - q_len snake_case__ : Any = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['''category'''][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(__lowerCAmelCase ), "end_token": [-100] * len(__lowerCAmelCase ), "category": category, }, } snake_case__ : Optional[int] = out['''context'''].split() snake_case__ : Union[str, Any] = splitted_context[answer['''end_token''']] snake_case__ : str = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=__lowerCAmelCase , ).input_ids ) snake_case__ : Tuple = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=__lowerCAmelCase ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token snake_case__ : Dict = len(tokenizer(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 snake_case__ : Optional[Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive snake_case__ : Optional[Any] = answer['''start_token'''] snake_case__ : str = answer['''end_token'''] if assertion: snake_case__ : int = tokenizer.decode(__lowerCAmelCase ) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''' ) print('''OLD:''' , answer['''span'''] ) print('''NEW:''' , __lowerCAmelCase , end='''\n\n''' ) if len(__lowerCAmelCase ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } snake_case__ : List[Any] = input_ids[:q_len] snake_case__ : str = range(__lowerCAmelCase , len(__lowerCAmelCase ) , max_length - doc_stride ) snake_case__ : List[str] = [] snake_case__ : str = [] snake_case__ : int = [] snake_case__ : str = [] # null, yes, no, long, short for i in doc_start_indices: snake_case__ : List[Any] = i + max_length - q_len snake_case__ : Tuple = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: snake_case__ : Dict = start_token - i + q_len snake_case__ : Union[str, Any] = end_token - i + q_len answers_category.append(answer['''category'''][0] ) # ["short"] -> "short" else: snake_case__ : Optional[Any] = -100 snake_case__ : Optional[int] = -100 answers_category.append('''null''' ) snake_case__ : str = inputs[-1][start_token : end_token + 1] answers_start_token.append(__lowerCAmelCase ) answers_end_token.append(__lowerCAmelCase ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' , example['''id'''] ) print('''New:''' , tokenizer.decode(__lowerCAmelCase ) ) print('''Old:''' , tokenizer.decode(__lowerCAmelCase ) , end='''\n\n''' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=2048 , __lowerCAmelCase=4096 , __lowerCAmelCase=False ) -> int: """simple docstring""" snake_case__ : List[Any] = get_strided_contexts_and_ans( __lowerCAmelCase , __lowerCAmelCase , doc_stride=__lowerCAmelCase , max_length=__lowerCAmelCase , assertion=__lowerCAmelCase , ) return example def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" with jsonlines.open(__lowerCAmelCase , '''a''' ) as writer: for example in tqdm(__lowerCAmelCase , total=len(__lowerCAmelCase ) , desc='''Saving samples ... ''' ): snake_case__ : Any = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer A__ = load_dataset('''natural_questions''') A__ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') A__ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] A__ = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } A__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) A__ = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) A__ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
230
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer A__ = logging.get_logger(__name__) A__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } A__ = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } A__ = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class a ( __lowerCamelCase ): __lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES __lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Optional[Any] = RealmTokenizer def __init__( self :Optional[Any] ,__lowercase :Dict=None ,__lowercase :Optional[int]=None ,__lowercase :Optional[Any]=True ,__lowercase :Optional[int]="[UNK]" ,__lowercase :List[str]="[SEP]" ,__lowercase :List[str]="[PAD]" ,__lowercase :int="[CLS]" ,__lowercase :str="[MASK]" ,__lowercase :Dict=True ,__lowercase :List[str]=None ,**__lowercase :Any ,): super().__init__( __lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,) snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars ): snake_case__ : Optional[int] = getattr(__lowercase ,normalizer_state.pop('''type''' ) ) snake_case__ : List[Any] = do_lower_case snake_case__ : Optional[Any] = strip_accents snake_case__ : List[str] = tokenize_chinese_chars snake_case__ : Dict = normalizer_class(**__lowercase ) snake_case__ : Tuple = do_lower_case def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,**__lowercase :Any ): snake_case__ : Dict = PaddingStrategy.MAX_LENGTH snake_case__ : List[str] = text snake_case__ : int = kwargs.pop('''text_pair''' ,__lowercase ) snake_case__ : Optional[int] = kwargs.pop('''return_tensors''' ,__lowercase ) snake_case__ : str = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(__lowercase ): if batch_text_pair is not None: snake_case__ : Optional[int] = batch_text_pair[idx] else: snake_case__ : Tuple = None snake_case__ : List[str] = super().__call__(__lowercase ,__lowercase ,return_tensors=__lowercase ,**__lowercase ) snake_case__ : Optional[Any] = encoded_candidates.get('''input_ids''' ) snake_case__ : Optional[Any] = encoded_candidates.get('''attention_mask''' ) snake_case__ : List[Any] = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(__lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__lowercase ) snake_case__ : Any = {key: item for key, item in output_data.items() if len(__lowercase ) != 0} return BatchEncoding(__lowercase ,tensor_type=__lowercase ) def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :Tuple=None ): snake_case__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self :List[str] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ): snake_case__ : Tuple = [self.sep_token_id] snake_case__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ): snake_case__ : Tuple = self._tokenizer.model.save(__lowercase ,name=__lowercase ) return tuple(__lowercase )
230
1
'''simple docstring''' from collections.abc import Sequence def lowercase (_A = None ): """simple docstring""" if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) _lowerCAmelCase : Optional[int] = nums[0] for i in range(1 , len(_UpperCAmelCase ) ): _lowerCAmelCase : Dict = nums[i] _lowerCAmelCase : Optional[Any] = max(_UpperCAmelCase , ans + num , _UpperCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowerCAmelCase = int(input("""Enter number of elements : """).strip()) lowerCAmelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
350
'''simple docstring''' def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCAmelCase : List[str] = '' _lowerCAmelCase : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )] # for each character in new_string find corresponding palindromic string _lowerCAmelCase : Any = 0 for j in range(len(_A ) ): _lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCAmelCase : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741 _lowerCAmelCase : int = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCAmelCase : Dict = length[j] _lowerCAmelCase : Optional[int] = j # create that string _lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
25
0
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A = """\ """ __A = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ __A = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def snake_case ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1_6 , __UpperCAmelCase = True , __UpperCAmelCase=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowerCAmelCase__ :Tuple = 'cuda' else: lowerCAmelCase__ :List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu' lowerCAmelCase__ :List[str] = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = model.to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(__UpperCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowerCAmelCase__ :Optional[Any] = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowerCAmelCase__ :List[str] = model.config.max_length - 1 else: lowerCAmelCase__ :Dict = model.config.max_length lowerCAmelCase__ :List[Any] = tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors='pt' , return_attention_mask=__UpperCAmelCase , ).to(__UpperCAmelCase ) lowerCAmelCase__ :Any = encodings['input_ids'] lowerCAmelCase__ :Optional[int] = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowerCAmelCase__ :Tuple = [] lowerCAmelCase__ :List[Any] = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ) ): lowerCAmelCase__ :Optional[int] = min(start_index + batch_size , len(__UpperCAmelCase ) ) lowerCAmelCase__ :Union[str, Any] = encoded_texts[start_index:end_index] lowerCAmelCase__ :Tuple = attn_masks[start_index:end_index] if add_start_token: lowerCAmelCase__ :int = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) lowerCAmelCase__ :Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCAmelCase ), attn_mask] , dim=1 ) lowerCAmelCase__ :Optional[int] = encoded_batch with torch.no_grad(): lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).logits lowerCAmelCase__ :str = out_logits[..., :-1, :].contiguous() lowerCAmelCase__ :Dict = labels[..., 1:].contiguous() lowerCAmelCase__ :Tuple = attn_mask[..., 1:].contiguous() lowerCAmelCase__ :Dict = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCAmelCase )}
293
"""simple docstring""" from __future__ import annotations __A = 1.6_021e-19 # units = C def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
293
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class A ( UpperCAmelCase_ ): __UpperCAmelCase : int = 'lilt' def __init__(self : List[Any] , __UpperCAmelCase : Dict=3_0_5_2_2 , __UpperCAmelCase : Dict=7_6_8 , __UpperCAmelCase : Union[str, Any]=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : str=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : int=1E-12 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Tuple="absolute" , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : int=4 , __UpperCAmelCase : Optional[int]=1_0_2_4 , **__UpperCAmelCase : List[Any] , ) -> str: """simple docstring""" super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = hidden_act UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = position_embedding_type UpperCAmelCase__ = classifier_dropout UpperCAmelCase__ = channel_shrink_ratio UpperCAmelCase__ = max_ad_position_embeddings
143
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive", [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ], ) def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> Any: '''simple docstring''' UpperCAmelCase__ = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bza_file, BzipaExtractor), "gzip": (gz_file, GzipExtractor), "lz4": (lza_file, LzaExtractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format] if input_path is None: UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__A ) assert base_extractor.is_extractable(__A ) UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(__A, __A ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCAmelCase__ = file_path.read_text(encoding="utf-8" ) else: UpperCAmelCase__ = output_path.read_text(encoding="utf-8" ) UpperCAmelCase__ = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive", [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ], ) def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ = { "7z": seven_zip_file, "bz2": bza_file, "gzip": gz_file, "lz4": lza_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } UpperCAmelCase__ = input_paths[compression_format] if input_path is None: UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__A ) UpperCAmelCase__ = Extractor.infer_extractor_format(__A ) assert extractor_format is not None UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(__A, __A, __A ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name UpperCAmelCase__ = file_path.read_text(encoding="utf-8" ) else: UpperCAmelCase__ = output_path.read_text(encoding="utf-8" ) UpperCAmelCase__ = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.fixture def lowerCAmelCase_ ( __A, __A ) -> List[str]: '''simple docstring''' import tarfile UpperCAmelCase__ = tmp_path / "data_dot_dot" directory.mkdir() UpperCAmelCase__ = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(__A, "w" ) as f: f.add(__A, arcname=os.path.join("..", text_file.name ) ) return path @pytest.fixture def lowerCAmelCase_ ( __A ) -> Dict: '''simple docstring''' import tarfile UpperCAmelCase__ = tmp_path / "data_sym_link" directory.mkdir() UpperCAmelCase__ = directory / "tar_file_with_sym_link.tar" os.symlink("..", directory / "subdir", target_is_directory=__A ) with tarfile.TarFile(__A, "w" ) as f: f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], ) def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A ) -> Dict: '''simple docstring''' UpperCAmelCase__ = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } UpperCAmelCase__ = insecure_tar_files[insecure_tar_file] UpperCAmelCase__ = tmp_path / "extracted" TarExtractor.extract(__A, __A ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' UpperCAmelCase__ = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 UpperCAmelCase__ = ( B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb" ) as f: f.write(__A ) assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(__A ) # but we're right
143
1
from __future__ import annotations from collections import namedtuple def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
49
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: __a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __a = tensor[:sequence_length] else: __a = tensor[:sequence_length] return out_tensor.tolist() def __snake_case ( _UpperCAmelCase ): __a = ord(_UpperCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True __a = unicodedata.category(_UpperCAmelCase ) if cat.startswith('''P''' ): return True return False @dataclass class _A ( __UpperCAmelCase ): UpperCamelCase__ : PreTrainedTokenizerBase UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = -100 UpperCamelCase__ : str = "pt" def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]): '''simple docstring''' import torch __a = '''label''' if '''label''' in features[0].keys() else '''labels''' __a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __a = self.tokenizer.pad( __SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __a = torch.tensor(batch['''entity_ids''']).shape[1] __a = self.tokenizer.padding_side if padding_side == "right": __a = [ list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels ] else: __a = [ [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels ] __a = [feature['''ner_tags'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = [feature['''original_entity_spans'''] for feature in features] __a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()} return batch
49
1
'''simple docstring''' import collections import os import re from pathlib import Path UpperCAmelCase_ = "src/transformers" # Matches is_xxx_available() UpperCAmelCase_ = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ = re.compile(r'\s+\"\S*\":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available UpperCAmelCase_ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ = re.compile(r'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ = re.compile(r'^\s+\"([^\"]+)\",') # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: UpperCAmelCase_ = re.compile(r'^\s*try:') # Catches a line with else: UpperCAmelCase_ = re.compile(r'^\s*else:') def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if _re_test_backend.search(__lowerCAmelCase ) is None: return None UpperCAmelCase__ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )] backends.sort() return "_and_".join(__lowerCAmelCase ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCAmelCase__ = f.readlines() UpperCAmelCase__ = 0 while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: UpperCAmelCase__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__lowerCAmelCase ): UpperCAmelCase__ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0] UpperCAmelCase__ = re.findall(r"""\[([^\]]+)\]""" , __lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue UpperCAmelCase__ = _re_import_struct_key_value.search(__lowerCAmelCase ) if single_line_import_search is not None: UpperCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): UpperCAmelCase__ = lines[line_index] if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None: UpperCAmelCase__ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ ) UpperCAmelCase__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_between_brackets.search(__lowerCAmelCase ) is not None: UpperCAmelCase__ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ ) UpperCAmelCase__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_quote_object.search(__lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase__ = [] while ( line_index < len(__lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): UpperCAmelCase__ = lines[line_index] UpperCAmelCase__ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(__lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): UpperCAmelCase__ = lines[line_index] UpperCAmelCase__ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' def find_duplicates(SCREAMING_SNAKE_CASE__ : int ): return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase__ = [] for key in import_dict_objects.keys(): UpperCAmelCase__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) UpperCAmelCase__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase__ = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: UpperCAmelCase__ = os.path.join(__lowerCAmelCase , """__init__.py""" ) UpperCAmelCase__ = parse_init(__lowerCAmelCase ) if objects is not None: UpperCAmelCase__ = analyze_results(*__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: UpperCAmelCase__ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > 0: raise ValueError("""\n\n""".join(__lowerCAmelCase ) ) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = [] for path, directories, files in os.walk(__lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(__lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue UpperCAmelCase__ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) ) UpperCAmelCase__ = short_path.replace(os.path.sep , """.""" ) submodules.append(__lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase__ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) ) UpperCAmelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(__lowerCAmelCase ) return submodules UpperCAmelCase_ = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def _UpperCamelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import UpperCAmelCase__ = direct_transformers_import(__lowerCAmelCase ) UpperCAmelCase__ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" ) as f: UpperCAmelCase__ = f.read() import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , __lowerCAmelCase ) ) ) UpperCAmelCase__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__lowerCAmelCase ) > 0: UpperCAmelCase__ = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
362
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging UpperCAmelCase_ = logging.get_logger(__name__) def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. UpperCAmelCase__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ ) if not mpi_options.get("""sagemaker_mpi_enabled""" , SCREAMING_SNAKE_CASE__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : str = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , _UpperCAmelCase , ) @cached_property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: UpperCAmelCase__ = torch.device("""cpu""" ) UpperCAmelCase__ = 0 elif is_sagemaker_model_parallel_available(): UpperCAmelCase__ = smp.local_rank() UpperCAmelCase__ = torch.device("""cuda""" , _UpperCAmelCase ) UpperCAmelCase__ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) UpperCAmelCase__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank ) UpperCAmelCase__ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 UpperCAmelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. UpperCAmelCase__ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank ) UpperCAmelCase__ = 1 if device.type == "cuda": torch.cuda.set_device(_UpperCAmelCase ) return device @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return not is_sagemaker_model_parallel_available() @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" return False
61
0
"""simple docstring""" import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput __A : Union[str, Any] = "scheduler_config.json" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 1 UpperCamelCase__ = 2 UpperCamelCase__ = 3 UpperCamelCase__ = 4 UpperCamelCase__ = 5 @dataclass class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = 42 class _a : """simple docstring""" UpperCamelCase__ = SCHEDULER_CONFIG_NAME UpperCamelCase__ = ["""dtype"""] UpperCamelCase__ = [] UpperCamelCase__ = True @classmethod def lowercase__ ( cls : Any , __UpperCamelCase : Dict[str, Any] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[Any] , )->List[str]: _UpperCAmelCase , _UpperCAmelCase = cls.load_config( pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase , _UpperCAmelCase = cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase ) if hasattr(__UpperCamelCase , '''create_state''' ) and getattr(__UpperCamelCase , '''has_state''' , __UpperCamelCase ): _UpperCAmelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, os.PathLike] , __UpperCamelCase : bool = False , **__UpperCamelCase : int )->Tuple: self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase ) @property def lowercase__ ( self : Any )->int: return self._get_compatibles() @classmethod def lowercase__ ( cls : Optional[int] )->Optional[int]: _UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) ) _UpperCAmelCase = importlib.import_module(__name__.split('''.''' )[0] ) _UpperCAmelCase = [ getattr(__UpperCamelCase , __UpperCamelCase ) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase ) ] return compatible_classes def lowercase ( _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : Tuple[int] ): '''simple docstring''' assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any=0.999 , _SCREAMING_SNAKE_CASE : Union[str, Any]=jnp.floataa ): '''simple docstring''' def alpha_bar(_SCREAMING_SNAKE_CASE : List[Any] ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 _UpperCAmelCase = [] for i in range(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i / num_diffusion_timesteps _UpperCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) @flax.struct.dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @classmethod def lowercase__ ( cls : Tuple , __UpperCamelCase : Optional[Any] )->List[str]: _UpperCAmelCase = scheduler.config if config.trained_betas is not None: _UpperCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": _UpperCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _UpperCAmelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _UpperCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) _UpperCAmelCase = 1.0 - betas _UpperCAmelCase = jnp.cumprod(__UpperCamelCase , axis=0 ) return cls( alphas=__UpperCamelCase , betas=__UpperCamelCase , alphas_cumprod=__UpperCamelCase , ) def lowercase ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ): '''simple docstring''' _UpperCAmelCase = state.alphas_cumprod _UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5 _UpperCAmelCase = sqrt_alpha_prod.flatten() _UpperCAmelCase = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape ) _UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 _UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten() _UpperCAmelCase = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowercase ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowercase ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
260
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
1
"""simple docstring""" import requests from bsa import BeautifulSoup def __lowerCamelCase ( __UpperCamelCase = "AAPL" ) -> str: """simple docstring""" lowerCAmelCase_ : Union[str, Any] = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' lowerCAmelCase_ : Tuple = BeautifulSoup(requests.get(__UpperCamelCase ).text , "html.parser" ) lowerCAmelCase_ : str = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
161
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , a_ : List[str] , a_ : Tuple=7 , a_ : Any=3 , a_ : Union[str, Any]=18 , a_ : List[str]=30 , a_ : List[str]=4_00 , a_ : str=True , a_ : Tuple=None , a_ : str=True , a_ : Optional[int]=None , ): lowerCAmelCase_ : Any = size if size is not None else {"shortest_edge": 20} lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18} lowerCAmelCase_ : int = parent lowerCAmelCase_ : Dict = batch_size lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : str = image_size lowerCAmelCase_ : int = min_resolution lowerCAmelCase_ : Tuple = max_resolution lowerCAmelCase_ : str = do_resize lowerCAmelCase_ : List[Any] = size lowerCAmelCase_ : Any = do_center_crop lowerCAmelCase_ : Tuple = crop_size def lowerCamelCase ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : int = MobileNetVaImageProcessingTester(self ) @property def lowerCamelCase ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "do_center_crop" ) ) self.assertTrue(hasattr(a_ , "crop_size" ) ) def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def lowerCamelCase ( self : Tuple ): pass def lowerCamelCase ( self : Any ): # Initialize image_processing lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCamelCase ( self : str ): # Initialize image_processing lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : str = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
161
1
'''simple docstring''' from jiwer import compute_measures import datasets _lowerCAmelCase = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' _lowerCAmelCase = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' _lowerCAmelCase = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=False ) -> Union[str, Any]: if concatenate_texts: return compute_measures(__UpperCAmelCase ,__UpperCAmelCase )["wer"] else: lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Dict = 0 for prediction, reference in zip(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = compute_measures(__UpperCAmelCase ,__UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
37
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 ) -> int: '''simple docstring''' lowercase_ = 1 lowercase_ = 1 lowercase_ = {1: 1} for inputa in range(2 , __lowerCAmelCase ): lowercase_ = 0 lowercase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowercase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowercase_ = counter if counter > pre_counter: lowercase_ = inputa lowercase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
136
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin a = False @skip_mps class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : int = StableDiffusionAttendAndExcitePipeline UpperCAmelCase : List[Any] = False UpperCAmelCase : Dict = TEXT_TO_IMAGE_PARAMS UpperCAmelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) UpperCAmelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def lowerCAmelCase_ ( cls : Any ): super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def lowerCAmelCase_ ( cls : List[Any] ): super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , ) _A = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , ) _A = CLIPTextModel(_UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=0 ): if str(_UpperCAmelCase ).startswith('mps' ): _A = torch.manual_seed(_UpperCAmelCase ) else: _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _A = _A = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def lowerCAmelCase_ ( self : List[Any] ): _A = 'cpu' _A = self.get_dummy_components() _A = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = self.get_dummy_inputs(_UpperCAmelCase ) _A = pipe(**_UpperCAmelCase ).images _A = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) _A = np.array( [0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] ) _A = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase , 1E-3 ) def lowerCAmelCase_ ( self : int ): super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def lowerCAmelCase_ ( self : Any ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCAmelCase_ ( self : Any ): self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def lowerCAmelCase_ ( self : List[str] ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def lowerCAmelCase_ ( self : Any ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def lowerCAmelCase_ ( self : Dict ): super().test_save_load_local(expected_max_difference=5E-4 ) def lowerCAmelCase_ ( self : Optional[int] ): super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class lowercase_ ( unittest.TestCase ): '''simple docstring''' @classmethod def lowerCAmelCase_ ( cls : int ): super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def lowerCAmelCase_ ( cls : List[str] ): super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : List[Any] ): _A = torch.manual_seed(51 ) _A = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa ) pipe.to('cuda' ) _A = 'a painting of an elephant with glasses' _A = [5, 7] _A = pipe( prompt=_UpperCAmelCase , token_indices=_UpperCAmelCase , guidance_scale=7.5 , generator=_UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
271
"""simple docstring""" import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def _snake_case ( _snake_case : int ) -> Any: '''simple docstring''' random.seed(_snake_case ) np.random.seed(_snake_case ) torch.manual_seed(_snake_case ) torch.cuda.manual_seed_all(_snake_case ) # ^^ safe to call this function even if cuda is not available class lowercase_ : '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : Iterable[torch.nn.Parameter] , _UpperCAmelCase : float = 0.9999 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[float, int] = 1.0 , _UpperCAmelCase : Union[float, int] = 2 / 3 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Dict[str, Any] = None , **_UpperCAmelCase : Optional[int] , ): if isinstance(_UpperCAmelCase , torch.nn.Module ): _A = ( 'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ' 'Please pass the parameters of the module instead.' ) deprecate( 'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , ) _A = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility _A = True if kwargs.get('max_value' , _UpperCAmelCase ) is not None: _A = 'The `max_value` argument is deprecated. Please use `decay` instead.' deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) _A = kwargs['max_value'] if kwargs.get('min_value' , _UpperCAmelCase ) is not None: _A = 'The `min_value` argument is deprecated. Please use `min_decay` instead.' deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) _A = kwargs['min_value'] _A = list(_UpperCAmelCase ) _A = [p.clone().detach() for p in parameters] if kwargs.get('device' , _UpperCAmelCase ) is not None: _A = 'The `device` argument is deprecated. Please use `to` instead.' deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) self.to(device=kwargs['device'] ) _A = None _A = decay _A = min_decay _A = update_after_step _A = use_ema_warmup _A = inv_gamma _A = power _A = 0 _A = None # set in `step()` _A = model_cls _A = model_config @classmethod def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ): _A , _A = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase ) _A = model_cls.from_pretrained(_UpperCAmelCase ) _A = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config ) ema_model.load_state_dict(_UpperCAmelCase ) return ema_model def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ): if self.model_cls is None: raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' ) if self.model_config is None: raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' ) _A = self.model_cls.from_config(self.model_config ) _A = self.state_dict() state_dict.pop('shadow_params' , _UpperCAmelCase ) model.register_to_config(**_UpperCAmelCase ) self.copy_to(model.parameters() ) model.save_pretrained(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ): _A = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: _A = 1 - (1 + step / self.inv_gamma) ** -self.power else: _A = (1 + step) / (10 + step) _A = min(_UpperCAmelCase , self.decay ) # make sure decay is not smaller than min_decay _A = max(_UpperCAmelCase , self.min_decay ) return cur_decay_value @torch.no_grad() def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Iterable[torch.nn.Parameter] ): if isinstance(_UpperCAmelCase , torch.nn.Module ): _A = ( 'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ' 'Please pass the parameters of the module instead.' ) deprecate( 'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , ) _A = parameters.parameters() _A = list(_UpperCAmelCase ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. _A = self.get_decay(self.optimization_step ) _A = decay _A = 1 - decay _A = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , _UpperCAmelCase ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): _A = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] ): _A = list(_UpperCAmelCase ) for s_param, param in zip(self.shadow_params , _UpperCAmelCase ): param.data.copy_(s_param.to(param.device ).data ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None ): _A = [ p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase ) for p in self.shadow_params ] def lowerCAmelCase_ ( self : Dict ): return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Iterable[torch.nn.Parameter] ): _A = [param.detach().cpu().clone() for param in parameters] def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Iterable[torch.nn.Parameter] ): if self.temp_stored_params is None: raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' ) for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ): param.data.copy_(c_param.data ) # Better memory-wise. _A = None def lowerCAmelCase_ ( self : int , _UpperCAmelCase : dict ): _A = copy.deepcopy(_UpperCAmelCase ) _A = state_dict.get('decay' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('Decay must be between 0 and 1' ) _A = state_dict.get('min_decay' , self.min_decay ) if not isinstance(self.min_decay , _UpperCAmelCase ): raise ValueError('Invalid min_decay' ) _A = state_dict.get('optimization_step' , self.optimization_step ) if not isinstance(self.optimization_step , _UpperCAmelCase ): raise ValueError('Invalid optimization_step' ) _A = state_dict.get('update_after_step' , self.update_after_step ) if not isinstance(self.update_after_step , _UpperCAmelCase ): raise ValueError('Invalid update_after_step' ) _A = state_dict.get('use_ema_warmup' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , _UpperCAmelCase ): raise ValueError('Invalid use_ema_warmup' ) _A = state_dict.get('inv_gamma' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('Invalid inv_gamma' ) _A = state_dict.get('power' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('Invalid power' ) _A = state_dict.get('shadow_params' , _UpperCAmelCase ) if shadow_params is not None: _A = shadow_params if not isinstance(self.shadow_params , _UpperCAmelCase ): raise ValueError('shadow_params must be a list' ) if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ): raise ValueError('shadow_params must all be Tensors' )
271
1
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCAmelCase = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCamelCase : Union[str, Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCamelCase : List[Any] = TaTokenizerFast UpperCamelCase : List[str] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Union[str, Any] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCamelCase : Optional[int] = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
360
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def A ( snake_case :List[Any] , snake_case :Dict=1 ) -> Optional[int]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def A ( snake_case :Dict , snake_case :int=0 ) -> Optional[int]: __UpperCamelCase = [] for old_item in old_list: __UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' ) __UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' ) __UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' ) __UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' ) __UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' ) __UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' ) __UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case :Optional[Any] , snake_case :Tuple=0 ) -> Tuple: __UpperCamelCase = [] for old_item in old_list: __UpperCamelCase = old_item __UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' ) __UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' ) __UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) __UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) __UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case :int , snake_case :List[str] , snake_case :List[str] , snake_case :Any=None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None ) -> Optional[int]: assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __UpperCamelCase = old_checkpoint[path] __UpperCamelCase = old_tensor.shape[0] // 3 __UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3 __UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 ) __UpperCamelCase = query.reshape(snake_case ) __UpperCamelCase = key.reshape(snake_case ) __UpperCamelCase = value.reshape(snake_case ) for path in paths: __UpperCamelCase = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) __UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) __UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: __UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __UpperCamelCase = old_checkpoint[path['old']][:, :, 0] else: __UpperCamelCase = old_checkpoint[path['old']] def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Optional[Any]: __UpperCamelCase = {} __UpperCamelCase = checkpoint['time_embed.0.weight'] __UpperCamelCase = checkpoint['time_embed.0.bias'] __UpperCamelCase = checkpoint['time_embed.2.weight'] __UpperCamelCase = checkpoint['time_embed.2.bias'] __UpperCamelCase = checkpoint['input_blocks.0.0.weight'] __UpperCamelCase = checkpoint['input_blocks.0.0.bias'] __UpperCamelCase = checkpoint['out.0.weight'] __UpperCamelCase = checkpoint['out.0.bias'] __UpperCamelCase = checkpoint['out.2.weight'] __UpperCamelCase = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the middle blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the output blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(snake_case ) } for i in range(1 , snake_case ): __UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1) __UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1) __UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] __UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: __UpperCamelCase = checkpoint[ f'input_blocks.{i}.0.op.weight' ] __UpperCamelCase = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} __UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case ) if len(snake_case ): __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } __UpperCamelCase = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , ) __UpperCamelCase = middle_blocks[0] __UpperCamelCase = middle_blocks[1] __UpperCamelCase = middle_blocks[2] __UpperCamelCase = renew_resnet_paths(snake_case ) assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case ) __UpperCamelCase = renew_resnet_paths(snake_case ) assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case ) __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case ) for i in range(snake_case ): __UpperCamelCase = i // (config['num_res_blocks'] + 1) __UpperCamelCase = i % (config['num_res_blocks'] + 1) __UpperCamelCase = [shave_segments(snake_case , 2 ) for name in output_blocks[i]] __UpperCamelCase = {} for layer in output_block_layers: __UpperCamelCase , __UpperCamelCase = layer.split('.' )[0], shave_segments(snake_case , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case ) else: __UpperCamelCase = [layer_name] if len(snake_case ) > 1: __UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] __UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) __UpperCamelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] __UpperCamelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(snake_case ) == 2: __UpperCamelCase = [] if len(snake_case ): __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } __UpperCamelCase = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , ) else: __UpperCamelCase = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __UpperCamelCase = '.'.join(['output_blocks', str(snake_case ), path['old']] ) __UpperCamelCase = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] ) __UpperCamelCase = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase : Dict = parser.parse_args() UpperCamelCase : Optional[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCamelCase : int = json.loads(f.read()) UpperCamelCase : Dict = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCamelCase : Optional[Any] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCamelCase : List[Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
263
0
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( lowerCAmelCase__ ): UpperCamelCase__ : Any =(DDPMParallelScheduler,) def lowerCamelCase ( self : Any , **lowercase_ : List[str] ) -> int: """simple docstring""" _lowerCamelCase : int ={ 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**__a ) return config def lowerCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCamelCase ( self : Optional[Any] ) -> str: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__a ) def lowerCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def lowerCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def lowerCamelCase ( self : str ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCamelCase ( self : Any ) -> List[Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__a ) def lowerCamelCase ( self : str ) -> int: """simple docstring""" _lowerCamelCase : Union[str, Any] =self.scheduler_classes[0] _lowerCamelCase : int =self.get_scheduler_config() _lowerCamelCase : Any =scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def lowerCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _lowerCamelCase : Any =self.scheduler_classes[0] _lowerCamelCase : Any =self.get_scheduler_config() _lowerCamelCase : str =scheduler_class(**__a ) _lowerCamelCase : Dict =len(__a ) _lowerCamelCase : Union[str, Any] =self.dummy_model() _lowerCamelCase : int =self.dummy_sample_deter _lowerCamelCase : str =self.dummy_sample_deter + 0.1 _lowerCamelCase : List[str] =self.dummy_sample_deter - 0.1 _lowerCamelCase : List[str] =samplea.shape[0] _lowerCamelCase : List[str] =torch.stack([samplea, samplea, samplea] , dim=0 ) _lowerCamelCase : List[str] =torch.arange(__a )[0:3, None].repeat(1 , __a ) _lowerCamelCase : Union[str, Any] =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _lowerCamelCase : Dict =scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _lowerCamelCase : str =torch.sum(torch.abs(__a ) ) _lowerCamelCase : Optional[Any] =torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def lowerCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : List[str] =self.scheduler_classes[0] _lowerCamelCase : Optional[int] =self.get_scheduler_config() _lowerCamelCase : List[str] =scheduler_class(**__a ) _lowerCamelCase : List[Any] =len(__a ) _lowerCamelCase : List[Any] =self.dummy_model() _lowerCamelCase : Union[str, Any] =self.dummy_sample_deter _lowerCamelCase : Optional[int] =torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual _lowerCamelCase : int =model(__a , __a ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] =scheduler.step(__a , __a , __a , generator=__a ).prev_sample _lowerCamelCase : Optional[Any] =pred_prev_sample _lowerCamelCase : str =torch.sum(torch.abs(__a ) ) _lowerCamelCase : Optional[Any] =torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def lowerCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : int =self.scheduler_classes[0] _lowerCamelCase : Union[str, Any] =self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCamelCase : Any =scheduler_class(**__a ) _lowerCamelCase : Any =len(__a ) _lowerCamelCase : List[Any] =self.dummy_model() _lowerCamelCase : Any =self.dummy_sample_deter _lowerCamelCase : Optional[int] =torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual _lowerCamelCase : List[str] =model(__a , __a ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Any =scheduler.step(__a , __a , __a , generator=__a ).prev_sample _lowerCamelCase : str =pred_prev_sample _lowerCamelCase : List[str] =torch.sum(torch.abs(__a ) ) _lowerCamelCase : Dict =torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def lowerCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" _lowerCamelCase : Union[str, Any] =self.scheduler_classes[0] _lowerCamelCase : str =self.get_scheduler_config() _lowerCamelCase : List[Any] =scheduler_class(**__a ) _lowerCamelCase : Tuple =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__a ) _lowerCamelCase : Dict =scheduler.timesteps for i, timestep in enumerate(__a ): if i == len(__a ) - 1: _lowerCamelCase : Any =-1 else: _lowerCamelCase : str =timesteps[i + 1] _lowerCamelCase : Union[str, Any] =scheduler.previous_timestep(__a ) _lowerCamelCase : List[Any] =prev_t.item() self.assertEqual(__a , __a ) def lowerCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : int =self.scheduler_classes[0] _lowerCamelCase : Dict =self.get_scheduler_config() _lowerCamelCase : str =scheduler_class(**__a ) _lowerCamelCase : Dict =[100, 87, 50, 51, 0] with self.assertRaises(__a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=__a ) def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : str =self.scheduler_classes[0] _lowerCamelCase : List[Any] =self.get_scheduler_config() _lowerCamelCase : Any =scheduler_class(**__a ) _lowerCamelCase : Optional[Any] =[100, 87, 50, 1, 0] _lowerCamelCase : Optional[Any] =len(__a ) with self.assertRaises(__a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a ) def lowerCamelCase ( self : str ) -> List[Any]: """simple docstring""" _lowerCamelCase : str =self.scheduler_classes[0] _lowerCamelCase : Optional[int] =self.get_scheduler_config() _lowerCamelCase : Optional[Any] =scheduler_class(**__a ) _lowerCamelCase : Any =[scheduler.config.num_train_timesteps] with self.assertRaises( __a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=__a )
199
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""] __UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor""" __UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""") def __init__( self , __a=None , __a=None , **__a ): __lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) __lowerCAmelCase = kwargs.pop("feature_extractor" ) __lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self , __a=None , __a=None , __a=None , **__a ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a ) if images is not None: __lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: __lowerCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def snake_case ( self , *__a , **__a ): return self.tokenizer.batch_decode(*__a , **__a ) def snake_case ( self , *__a , **__a ): return self.tokenizer.decode(*__a , **__a ) @property def snake_case ( self ): __lowerCAmelCase = self.tokenizer.model_input_names __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
57
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: UpperCAmelCase__ = None UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } UpperCAmelCase__ = { "google/fnet-base": 512, "google/fnet-large": 512, } UpperCAmelCase__ = "▁" class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ['''input_ids''', '''token_type_ids'''] __snake_case = FNetTokenizer def __init__( self : List[str] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Optional[Any]="[SEP]" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Optional[int]="[CLS]" , __UpperCAmelCase : Any="[MASK]" , **__UpperCAmelCase : Tuple , ) ->List[Any]: """simple docstring""" a = ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) a = do_lower_case a = remove_space a = keep_accents a = vocab_file a = False if not self.vocab_file else True def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
26
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5") def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]: hf_model.apply_weight_norm() a = checkpoint['''input_conv.weight_g'''] a = checkpoint['''input_conv.weight_v'''] a = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): a = checkpoint[F"""upsamples.{i}.1.weight_g"""] a = checkpoint[F"""upsamples.{i}.1.weight_v"""] a = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] a = checkpoint['''output_conv.1.weight_g'''] a = checkpoint['''output_conv.1.weight_v'''] a = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int: if config_path is not None: a = SpeechTaHifiGanConfig.from_pretrained(a ) else: a = SpeechTaHifiGanConfig() a = SpeechTaHifiGan(a ) a = torch.load(a ) load_weights(orig_checkpoint['''model''']['''generator'''] , a , a ) a = np.load(a ) a = stats[0].reshape(-1 ) a = stats[1].reshape(-1 ) a = torch.from_numpy(a ).float() a = torch.from_numpy(a ).float() model.save_pretrained(a ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(a ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCAmelCase__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
26
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } __A = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> int: """simple docstring""" __lowerCamelCase = {} with open(UpperCamelCase__ , 'r' ) as file: for line_number, line in enumerate(UpperCamelCase__ ): __lowerCamelCase = line.strip() if line: __lowerCamelCase = line.split() __lowerCamelCase = line_number __lowerCamelCase = words[0] __lowerCamelCase = value return result def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Tuple: """simple docstring""" for attribute in key.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCamelCase__ ): __lowerCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]] __lowerCamelCase = 'param' if weight_type is not None and weight_type != "param": __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape elif weight_type is not None and weight_type == "param": __lowerCamelCase = hf_pointer for attribute in hf_param_name.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = shape_pointer.shape # let's reduce dimension __lowerCamelCase = value[0] else: __lowerCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): __lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = value else: __lowerCamelCase = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCamelCase__ ): __lowerCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]] __lowerCamelCase = 'param' if weight_type is not None and weight_type != "param": __lowerCamelCase = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCamelCase = '.'.join([key, hf_param_name] ) else: __lowerCamelCase = key __lowerCamelCase = value if 'lm_head' in full_key else value[0] __A = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = False for key, mapped_key in MAPPING.items(): __lowerCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCamelCase = 'weight_g' elif "weight_v" in name: __lowerCamelCase = 'weight_v' elif "bias" in name: __lowerCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase = 'weight' else: __lowerCamelCase = None if hf_dict is not None: rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return is_used return is_used def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase = True else: __lowerCamelCase = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Any: """simple docstring""" __lowerCamelCase = full_name.split('conv_layers.' )[-1] __lowerCamelCase = name.split('.' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None , UpperCamelCase__ : int=True , UpperCamelCase__ : str=False ) -> str: """simple docstring""" if config_path is not None: __lowerCamelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCamelCase = WavaVecaConfig() if is_seq_class: __lowerCamelCase = read_txt_into_dict(UpperCamelCase__ ) __lowerCamelCase = idalabel __lowerCamelCase = WavaVecaForSequenceClassification(UpperCamelCase__ ) __lowerCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ) feature_extractor.save_pretrained(UpperCamelCase__ ) elif is_finetuned: if dict_path: __lowerCamelCase = Dictionary.load(UpperCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCamelCase = target_dict.pad_index __lowerCamelCase = target_dict.bos_index __lowerCamelCase = target_dict.eos_index __lowerCamelCase = len(target_dict.symbols ) __lowerCamelCase = os.path.join(UpperCamelCase__ , 'vocab.json' ) if not os.path.isdir(UpperCamelCase__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase__ ) ) return os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) __lowerCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCamelCase = 0 __lowerCamelCase = 1 with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = WavaVecaCTCTokenizer( UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase__ , ) __lowerCamelCase = True if config.feat_extract_norm == 'layer' else False __lowerCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , ) __lowerCamelCase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) __lowerCamelCase = WavaVecaForCTC(UpperCamelCase__ ) else: __lowerCamelCase = WavaVecaForPreTraining(UpperCamelCase__ ) if is_finetuned or is_seq_class: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: __lowerCamelCase = argparse.Namespace(task='audio_pretraining' ) __lowerCamelCase = fairseq.tasks.setup_task(UpperCamelCase__ ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ ) __lowerCamelCase = model[0].eval() recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) __A = parser.parse_args() __A = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
90
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any=1024 ) -> Dict: """simple docstring""" __lowerCamelCase , __lowerCamelCase = [], [] __lowerCamelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) ) __lowerCamelCase , __lowerCamelCase = sorted_examples[0] def is_too_big(UpperCamelCase__ : List[str] ): return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __lowerCamelCase = new_src + ' ' + src __lowerCamelCase = new_tgt + ' ' + tgt if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example finished_src.append(UpperCamelCase__ ) finished_tgt.append(UpperCamelCase__ ) __lowerCamelCase , __lowerCamelCase = src, tgt else: # can fit, keep adding __lowerCamelCase , __lowerCamelCase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCamelCase__ ) finished_tgt.append(UpperCamelCase__ ) return finished_src, finished_tgt def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" __lowerCamelCase = Path(UpperCamelCase__ ) save_path.mkdir(exist_ok=UpperCamelCase__ ) for split in ["train"]: __lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" __lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()] __lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()] __lowerCamelCase , __lowerCamelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" ) Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) ) Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) ) for split in ["val", "test"]: __lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" ) shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" ) def lowerCamelCase_ ( ) -> List[str]: """simple docstring""" __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=128 ) parser.add_argument('--data_dir' , type=UpperCamelCase__ ) parser.add_argument('--save_path' , type=UpperCamelCase__ ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
90
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase : int = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
74
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Optional[int] = logging.get_logger(__name__) __UpperCamelCase : List[Any] = '''▁''' __UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''} __UpperCamelCase : Tuple = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } __UpperCamelCase : Optional[Any] = { '''google/reformer-crime-and-punishment''': 5_2_4_2_8_8, } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : Optional[int]="</s>" ,lowercase_ : List[Any]="<unk>" ,lowercase_ : Optional[Any]=[] ,lowercase_ : Optional[Dict[str, Any]] = None ,**lowercase_ : int ,): lowerCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase_ ,unk_token=lowercase_ ,additional_special_tokens=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,) lowerCAmelCase__ : List[str] = vocab_file lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) @property def __lowerCAmelCase ( self : List[str] ): return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): lowerCAmelCase__ : str = self.__dict__.copy() lowerCAmelCase__ : Any = None return state def __setstate__( self : List[str] ,lowercase_ : Any ): lowerCAmelCase__ : Union[str, Any] = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Dict ,lowercase_ : str ): return self.sp_model.encode(lowercase_ ,out_type=lowercase_ ) def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ): return self.sp_model.piece_to_id(lowercase_ ) def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ): if index < self.sp_model.get_piece_size(): lowerCAmelCase__ : List[Any] = self.sp_model.IdToPiece(lowercase_ ) return token def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[Any] ): lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_ ) + token lowerCAmelCase__ : Dict = [] else: current_sub_tokens.append(lowercase_ ) out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ,lowercase_ : Optional[str] = None ): if not os.path.isdir(lowercase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase__ : List[Any] = os.path.join( lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ ,'''wb''' ) as fi: lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
74
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Dict: _lowerCAmelCase : Any = min(_lowerCamelCase ) # min() finds the minimum value _lowerCAmelCase : Dict = max(_lowerCamelCase ) # max() finds the maximum value _lowerCAmelCase : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size _lowerCAmelCase : Optional[Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_lowerCamelCase ,_lowerCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. _lowerCAmelCase : Tuple = 0 for count in range(_lowerCamelCase ): while holes[count] > 0: holes[count] -= 1 _lowerCAmelCase : Optional[int] = count + min_val i += 1 def SCREAMING_SNAKE_CASE ( ) -> List[Any]: _lowerCAmelCase : List[str] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_lowerCamelCase ) print("""Sorted order is:""" ,""" """.join(_lowerCamelCase ) ) if __name__ == "__main__": main()
44
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0] print(our_output.shape ,their_output.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase__ : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class __lowerCamelCase ( __snake_case ): def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase ) -> Union[str, Any]: snake_case_ = parent snake_case_ = config_class snake_case_ = has_text_modality snake_case_ = kwargs snake_case_ = common_properties def lowerCAmelCase_ ( self ) -> List[Any]: snake_case_ = self.config_class(**self.inputs_dict ) snake_case_ = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) , msg=f'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(lowerCamelCase ): try: setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) self.parent.assertEqual( getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(lowerCamelCase , lowerCamelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(lowerCamelCase ): try: snake_case_ = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(lowerCamelCase , lowerCamelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def lowerCAmelCase_ ( self ) -> Optional[int]: snake_case_ = self.config_class(**self.inputs_dict ) snake_case_ = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , lowerCamelCase ) def lowerCAmelCase_ ( self ) -> Dict: snake_case_ = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(lowerCamelCase , """config.json""" ) config_first.to_json_file(lowerCamelCase ) snake_case_ = self.config_class.from_json_file(lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def lowerCAmelCase_ ( self ) -> Dict: snake_case_ = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(lowerCamelCase ) snake_case_ = self.config_class.from_pretrained(lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def lowerCAmelCase_ ( self ) -> Any: snake_case_ = self.config_class(**self.inputs_dict ) snake_case_ = """test""" with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(lowerCamelCase , lowerCamelCase ) config_first.save_pretrained(lowerCamelCase ) snake_case_ = self.config_class.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def lowerCAmelCase_ ( self ) -> Any: snake_case_ = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) snake_case_ = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def lowerCAmelCase_ ( self ) -> List[str]: if self.config_class.is_composition: return snake_case_ = self.config_class() self.parent.assertIsNotNone(lowerCamelCase ) def lowerCAmelCase_ ( self ) -> Dict: snake_case_ = copy.deepcopy(lowerCamelCase ) snake_case_ = self.config_class(**lowerCamelCase ) snake_case_ = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(lowerCamelCase , lowerCamelCase ) != value: wrong_values.append((key, getattr(lowerCamelCase , lowerCamelCase ), value) ) if len(lowerCamelCase ) > 0: snake_case_ = """\n""".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' ) def lowerCAmelCase_ ( self ) -> Tuple: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
368
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' snake_case_ = AutoConfig.from_pretrained(lowercase_ ) snake_case_ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ ) snake_case_ = checkpoints.load_tax_checkpoint(lowercase_ ) snake_case_ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": snake_case_ = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": snake_case_ = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): snake_case_ = f'''layers_{str(lowercase_ )}''' # Self-Attention snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning snake_case_ = flax_model.params["""encoder"""]["""block"""][str(lowercase_ )]["""layer"""] snake_case_ = tax_attention_key snake_case_ = tax_attention_out snake_case_ = tax_attention_query snake_case_ = tax_attention_value snake_case_ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = tax_global_layer_norm if split_mlp_wi: snake_case_ = tax_mlp_wi_a snake_case_ = tax_mlp_wi_a else: snake_case_ = tax_mlp_wi snake_case_ = tax_mlp_wo snake_case_ = tax_mlp_layer_norm snake_case_ = flax_model_encoder_layer_block # Only for layer 0: snake_case_ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T snake_case_ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T snake_case_ = tax_encoder_global_rel_embedding # Assigning snake_case_ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] snake_case_ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): snake_case_ = f'''layers_{str(lowercase_ )}''' # Self-Attention snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] snake_case_ = tax_enc_dec_attention_module["""key"""]["""kernel"""] snake_case_ = tax_enc_dec_attention_module["""out"""]["""kernel"""] snake_case_ = tax_enc_dec_attention_module["""query"""]["""kernel"""] snake_case_ = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning snake_case_ = flax_model.params["""decoder"""]["""block"""][str(lowercase_ )]["""layer"""] snake_case_ = tax_attention_key snake_case_ = tax_attention_out snake_case_ = tax_attention_query snake_case_ = tax_attention_value snake_case_ = tax_pre_attention_layer_norm snake_case_ = tax_enc_dec_attention_key snake_case_ = tax_enc_dec_attention_out snake_case_ = tax_enc_dec_attention_query snake_case_ = tax_enc_dec_attention_value snake_case_ = tax_cross_layer_norm if split_mlp_wi: snake_case_ = tax_mlp_wi_a snake_case_ = tax_mlp_wi_a else: snake_case_ = tax_mlp_wi snake_case_ = tax_mlp_wo snake_case_ = txa_mlp_layer_norm snake_case_ = flax_model_decoder_layer_block # Decoder Normalization snake_case_ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] snake_case_ = txa_decoder_norm # Only for layer 0: snake_case_ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T snake_case_ = tax_decoder_rel_embedding # Token Embeddings snake_case_ = tax_model["""target"""]["""token_embedder"""]["""embedding"""] snake_case_ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: snake_case_ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(lowercase_ ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCamelCase_ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
34
0
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ : Any = logging.get_logger(__name__) lowerCAmelCase__ : List[str] = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class __snake_case ( _lowerCamelCase ): __lowerCamelCase = """xlm-prophetnet""" __lowerCamelCase = ["""past_key_values"""] __lowerCamelCase = { """num_attention_heads""": """num_encoder_attention_heads""", } def __init__( self , __UpperCamelCase = 0.1 , __UpperCamelCase = "gelu" , __UpperCamelCase = 30522 , __UpperCamelCase = 1024 , __UpperCamelCase = 4096 , __UpperCamelCase = 12 , __UpperCamelCase = 16 , __UpperCamelCase = 4096 , __UpperCamelCase = 12 , __UpperCamelCase = 16 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 512 , __UpperCamelCase = 0.0_2 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 0 , __UpperCamelCase = 2 , __UpperCamelCase = 32 , __UpperCamelCase = 128 , __UpperCamelCase = False , __UpperCamelCase = 0.0 , __UpperCamelCase = True , __UpperCamelCase = 0 , __UpperCamelCase = 1 , __UpperCamelCase = 2 , **__UpperCamelCase , ) -> Dict: '''simple docstring''' snake_case__ : Tuple = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : List[str] = encoder_ffn_dim snake_case__ : List[str] = num_encoder_layers snake_case__ : List[Any] = num_encoder_attention_heads snake_case__ : Union[str, Any] = decoder_ffn_dim snake_case__ : List[Any] = num_decoder_layers snake_case__ : Union[str, Any] = num_decoder_attention_heads snake_case__ : Tuple = max_position_embeddings snake_case__ : Optional[int] = init_std # Normal(0, this parameter) snake_case__ : List[str] = activation_function # parameters for xlmprophetnet snake_case__ : List[str] = ngram snake_case__ : List[Any] = num_buckets snake_case__ : str = relative_max_distance snake_case__ : List[str] = disable_ngram_loss snake_case__ : Tuple = eps # 3 Types of Dropout snake_case__ : str = attention_dropout snake_case__ : Tuple = activation_dropout snake_case__ : Optional[Any] = dropout snake_case__ : Optional[int] = use_cache super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , add_cross_attention=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) @property def __a ( self ) -> int: '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __a ( self , __UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
143
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class __snake_case ( unittest.TestCase ): def __a ( self ) -> Tuple: '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__UpperCamelCase , ) assert hasattr(self , 'env' ) def __a ( self , __UpperCamelCase ) -> Optional[int]: '''simple docstring''' snake_case__ : Tuple = { 'enabled': True, 'processes_per_host': 8, } snake_case__ : Any = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } snake_case__ : Optional[int] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} snake_case__ : int = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 500, } , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' TrainingJobAnalytics(__UpperCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : str = self.create_estimator(__UpperCamelCase ) # run training estimator.fit() # result dataframe snake_case__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) snake_case__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase )
143
1
def _a ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(lowerCamelCase_ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
358
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[int] = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=False ): """simple docstring""" UpperCamelCase__ : str = '''backbone.''' if is_semantic else '''''' UpperCamelCase__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (F"{prefix}cls_token", '''beit.embeddings.cls_token'''), (F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''), (F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''), (F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=False ): """simple docstring""" for i in range(config.num_hidden_layers ): UpperCamelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCamelCase__ : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" ) UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" ) UpperCamelCase__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" ) UpperCamelCase__ : List[str] = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ : Optional[int] = q_bias UpperCamelCase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCamelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" ) UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" ) UpperCamelCase__ : Any = gamma_a UpperCamelCase__ : str = gamma_a def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" UpperCamelCase__ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = val def _a ( ): """simple docstring""" UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ): """simple docstring""" UpperCamelCase__ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True UpperCamelCase__ : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCamelCase__ : List[str] = 1024 UpperCamelCase__ : Union[str, Any] = 4096 UpperCamelCase__ : Optional[int] = 24 UpperCamelCase__ : List[str] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCamelCase__ : Any = 16 UpperCamelCase__ : Optional[int] = '''huggingface/label-files''' UpperCamelCase__ : Union[str, Any] = '''rvlcdip-id2label.json''' UpperCamelCase__ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} UpperCamelCase__ : int = idalabel UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCamelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model'''] UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE ) # load HuggingFace model UpperCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE ) # Check outputs on an image UpperCamelCase__ : List[str] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = prepare_img() UpperCamelCase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values'''] UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = outputs.logits # verify logits UpperCamelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __UpperCamelCase : Dict = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
51
0
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def a_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , ) -> int: """simple docstring""" if attention_mask is None: lowerCamelCase_ =input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCamelCase_ =decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCamelCase_ =torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__snake_case ) if decoder_head_mask is None: lowerCamelCase_ =torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__snake_case ) if cross_attn_head_mask is None: lowerCamelCase_ =torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__snake_case ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=99, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase="relu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=20, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=0, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =eos_token_id lowerCamelCase_ =pad_token_id lowerCamelCase_ =bos_token_id def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =self.eos_token_id # Eos Token lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCamelCase_ =input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase_ =decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCamelCase_ =self.get_config() lowerCamelCase_ =prepare_mam_aaa_inputs_dict(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return config, inputs_dict def lowercase__ ( self ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, encoder_layerdrop=self.encoder_layerdrop, decoder_layerdrop=self.decoder_layerdrop, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.prepare_config_and_inputs() return config, inputs_dict def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =MaMaaaModel(config=lowerCAmelCase ).get_decoder().to(lowerCAmelCase ).eval() lowerCamelCase_ =inputs_dict['''input_ids'''] lowerCamelCase_ =inputs_dict['''attention_mask'''] lowerCamelCase_ =inputs_dict['''head_mask'''] # first forward pass lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, head_mask=lowerCAmelCase, use_cache=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase_ =ids_tensor((self.batch_size, 3), 2 ) # append to next input_ids and lowerCamelCase_ =torch.cat([input_ids, next_tokens], dim=-1 ) lowerCamelCase_ =torch.cat([attention_mask, next_attn_mask], dim=-1 ) lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase )['''last_hidden_state'''] lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase )[ '''last_hidden_state''' ] # select random slice lowerCamelCase_ =ids_tensor((1,), output_from_past.shape[-1] ).item() lowerCamelCase_ =output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-2 ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =MaMaaaModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval() lowerCamelCase_ =model(**lowerCAmelCase ) lowerCamelCase_ =outputs.encoder_last_hidden_state lowerCamelCase_ =outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ =model.get_encoder() encoder.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =MaMaaaEncoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase ) lowerCamelCase_ =encoder(inputs_dict['''input_ids'''], attention_mask=inputs_dict['''attention_mask'''] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase_ =model.get_decoder() decoder.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =MaMaaaDecoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase ) lowerCamelCase_ =decoder( input_ids=inputs_dict['''decoder_input_ids'''], attention_mask=inputs_dict['''decoder_attention_mask'''], encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=inputs_dict['''attention_mask'''], )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowercase : Dict =(MaMaaaForConditionalGeneration,) if is_torch_available() else () lowercase : Optional[int] =( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowercase : Dict =True lowercase : Tuple =True lowercase : Optional[Any] =False lowercase : int =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =model_class.from_pretrained(lowerCAmelCase, output_loading_info=lowerCAmelCase ) self.assertEqual(info['''missing_keys'''], [] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =copy.deepcopy(self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) if not self.is_encoder_decoder: lowerCamelCase_ =inputs['''input_ids'''] del inputs["input_ids"] else: lowerCamelCase_ =inputs['''input_ids'''] lowerCamelCase_ =inputs.get('''decoder_input_ids''', lowerCAmelCase ) del inputs["input_ids"] inputs.pop('''decoder_input_ids''', lowerCAmelCase ) lowerCamelCase_ =model.get_input_embeddings() if not self.is_encoder_decoder: lowerCamelCase_ =wte(lowerCAmelCase ) else: lowerCamelCase_ =wte(lowerCAmelCase ) lowerCamelCase_ =wte(lowerCAmelCase ) with torch.no_grad(): model(**lowerCAmelCase )[0] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() lowerCamelCase_ =input_dict['''input_ids'''] lowerCamelCase_ =input_ids.ne(1 ).to(lowerCAmelCase ) lowerCamelCase_ =MaMaaaForConditionalGeneration(lowerCAmelCase ).eval().to(lowerCAmelCase ) if torch_device == "cuda": model.half() model.generate(lowerCAmelCase, attention_mask=lowerCAmelCase ) model.generate(num_beams=4, do_sample=lowerCAmelCase, early_stopping=lowerCAmelCase, num_return_sequences=3 ) def a_ ( __snake_case : Tuple ) -> List[Any]: """simple docstring""" return torch.tensor(__snake_case , dtype=torch.long , device=__snake_case ) a_ : int = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase ) lowerCamelCase_ =_long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) lowerCamelCase_ =_long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) lowerCamelCase_ =prepare_mam_aaa_inputs_dict(model.config, lowerCAmelCase, lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape, lowerCAmelCase ) # change to expected output here lowerCamelCase_ =torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]], device=lowerCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase ) # change to intended input lowerCamelCase_ =_long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) lowerCamelCase_ =_long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) lowerCamelCase_ =prepare_mam_aaa_inputs_dict(model.config, lowerCAmelCase, lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape, lowerCAmelCase ) # change to expected output here lowerCamelCase_ =torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]], device=lowerCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase ) lowerCamelCase_ =MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''', src_lang='''fr''', tgt_lang='''en''' ) lowerCamelCase_ =[ '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent''' ''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de''' ''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''', ] # The below article tests that we don't add any hypotheses outside of the top n_beams lowerCamelCase_ =tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' ) lowerCamelCase_ =model.generate( input_ids=dct['''input_ids'''].to(lowerCAmelCase ), attention_mask=dct['''attention_mask'''].to(lowerCAmelCase ), num_beams=5, forced_bos_token_id=tokenizer.get_lang_id('''en''' ), ) lowerCamelCase_ =[ '''The NSA case highlights the total absence of intelligence debate''', '''I think there are two levels of response from the French government.''', '''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.''' ''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all''' ''' communications in France.''', ] lowerCamelCase_ =tokenizer.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) assert generated == expected_en
75
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class A_ : '''simple docstring''' pass
61
0
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : List[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Dict = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: lowerCAmelCase_ :Tuple = 1_2_8 elif "12-12" in model_name: lowerCAmelCase_ :Optional[Any] = 1_2 lowerCAmelCase_ :Dict = 1_2 elif "14-14" in model_name: lowerCAmelCase_ :Any = 1_4 lowerCAmelCase_ :int = 1_4 elif "16-16" in model_name: lowerCAmelCase_ :Optional[int] = 1_6 lowerCAmelCase_ :str = 1_6 else: raise ValueError("""Model not supported""" ) lowerCAmelCase_ :Optional[Any] = """huggingface/label-files""" if "speech-commands" in model_name: lowerCAmelCase_ :Any = 3_5 lowerCAmelCase_ :str = """speech-commands-v2-id2label.json""" else: lowerCAmelCase_ :Any = 5_2_7 lowerCAmelCase_ :List[Any] = """audioset-id2label.json""" lowerCAmelCase_ :Tuple = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase_ :int = {int(lowercase__ ): v for k, v in idalabel.items()} lowerCAmelCase_ :List[str] = idalabel lowerCAmelCase_ :str = {v: k for k, v in idalabel.items()} return config def _snake_case ( lowercase__ : int ) -> int: '''simple docstring''' if "module.v" in name: lowerCAmelCase_ :Optional[int] = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: lowerCAmelCase_ :Tuple = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: lowerCAmelCase_ :Tuple = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: lowerCAmelCase_ :Union[str, Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: lowerCAmelCase_ :List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: lowerCAmelCase_ :List[str] = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: lowerCAmelCase_ :Dict = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowerCAmelCase_ :str = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowerCAmelCase_ :str = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowerCAmelCase_ :Any = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowerCAmelCase_ :List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCAmelCase_ :Tuple = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: lowerCAmelCase_ :Any = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: lowerCAmelCase_ :Tuple = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: lowerCAmelCase_ :Tuple = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def _snake_case ( lowercase__ : List[str] , lowercase__ : Dict ) -> List[str]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase_ :Dict = orig_state_dict.pop(lowercase__ ) if "qkv" in key: lowerCAmelCase_ :Union[str, Any] = key.split(""".""" ) lowerCAmelCase_ :Union[str, Any] = int(key_split[3] ) lowerCAmelCase_ :Optional[int] = config.hidden_size if "weight" in key: lowerCAmelCase_ :Optional[Any] = val[:dim, :] lowerCAmelCase_ :Optional[Any] = val[dim : dim * 2, :] lowerCAmelCase_ :str = val[-dim:, :] else: lowerCAmelCase_ :str = val[:dim] lowerCAmelCase_ :str = val[dim : dim * 2] lowerCAmelCase_ :Tuple = val[-dim:] else: lowerCAmelCase_ :Any = val return orig_state_dict def _snake_case ( lowercase__ : Tuple ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ :Any = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) @torch.no_grad() def _snake_case ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : int=False ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = get_audio_spectrogram_transformer_config(lowercase__ ) lowerCAmelCase_ :str = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict lowerCAmelCase_ :List[Any] = model_name_to_url[model_name] lowerCAmelCase_ :int = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" ) # remove some keys remove_keys(lowercase__ ) # rename some keys lowerCAmelCase_ :List[Any] = convert_state_dict(lowercase__ , lowercase__ ) # load 🤗 model lowerCAmelCase_ :int = ASTForAudioClassification(lowercase__ ) model.eval() model.load_state_dict(lowercase__ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 lowerCAmelCase_ :Dict = -4.2677393 if """speech-commands""" not in model_name else -6.845978 lowerCAmelCase_ :Union[str, Any] = 4.5689974 if """speech-commands""" not in model_name else 5.5654526 lowerCAmelCase_ :Optional[Any] = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8 lowerCAmelCase_ :str = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ ) if "speech-commands" in model_name: lowerCAmelCase_ :Any = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) lowerCAmelCase_ :List[str] = dataset[0]["""audio"""]["""array"""] else: lowerCAmelCase_ :Any = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) lowerCAmelCase_ , lowerCAmelCase_ :Tuple = torchaudio.load(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = waveform.squeeze().numpy() lowerCAmelCase_ :List[str] = feature_extractor(lowercase__ , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" ) # forward pass lowerCAmelCase_ :str = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": lowerCAmelCase_ :Dict = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": lowerCAmelCase_ :Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": lowerCAmelCase_ :Optional[int] = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": lowerCAmelCase_ :Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": lowerCAmelCase_ :List[Any] = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": lowerCAmelCase_ :List[str] = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": lowerCAmelCase_ :int = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": lowerCAmelCase_ :Any = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ): raise ValueError("""Logits don't match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(lowercase__ ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(f"""MIT/{model_name}""" ) feature_extractor.push_to_hub(f"""MIT/{model_name}""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __UpperCAmelCase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "detr" UpperCAmelCase_ :str = ["past_key_values"] UpperCAmelCase_ :Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCAmelCase_ :str = backbone_config.get("""model_type""" ) lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None lowerCAmelCase_ :Tuple = use_timm_backbone lowerCAmelCase_ :Optional[int] = backbone_config lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :int = num_queries lowerCAmelCase_ :List[Any] = d_model lowerCAmelCase_ :Optional[int] = encoder_ffn_dim lowerCAmelCase_ :Tuple = encoder_layers lowerCAmelCase_ :int = encoder_attention_heads lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim lowerCAmelCase_ :List[str] = decoder_layers lowerCAmelCase_ :Dict = decoder_attention_heads lowerCAmelCase_ :Dict = dropout lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Union[str, Any] = activation_dropout lowerCAmelCase_ :Any = activation_function lowerCAmelCase_ :List[str] = init_std lowerCAmelCase_ :Optional[int] = init_xavier_std lowerCAmelCase_ :int = encoder_layerdrop lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop lowerCAmelCase_ :List[str] = encoder_layers lowerCAmelCase_ :Union[str, Any] = auxiliary_loss lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :List[Any] = backbone lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :str = dilation # Hungarian matcher lowerCAmelCase_ :List[Any] = class_cost lowerCAmelCase_ :Union[str, Any] = bbox_cost lowerCAmelCase_ :Tuple = giou_cost # Loss coefficients lowerCAmelCase_ :Optional[int] = mask_loss_coefficient lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ :Tuple = bbox_loss_coefficient lowerCAmelCase_ :Tuple = giou_loss_coefficient lowerCAmelCase_ :Dict = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) -> int: return self.d_model @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> Any: return cls(backbone_config=__A , **__A ) def __lowerCAmelCase ( self ) -> Dict[str, any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase_ :Dict = self.backbone_config.to_dict() lowerCAmelCase_ :str = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-5 @property def __lowerCAmelCase ( self ) -> int: return 12
1
1
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent a__ : Optional[Any] = {"UserAgent": UserAgent().random} def snake_case ( UpperCAmelCase )-> dict: """simple docstring""" __A = script.contents[0] __A = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : def __init__( self :Optional[Any] , _A :Optional[Any] ) -> Optional[Any]: '''simple docstring''' __A = F'https://www.instagram.com/{username}/' __A = self.get_json() def lowercase_ ( self :Union[str, Any] ) -> dict: '''simple docstring''' __A = requests.get(self.url , headers=_A ).text __A = BeautifulSoup(_A , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self :Union[str, Any] ) -> str: '''simple docstring''' return F'{self.__class__.__name__}(\'{self.username}\')' def __str__( self :List[Any] ) -> str: '''simple docstring''' return F'{self.fullname} ({self.username}) is {self.biography}' @property def lowercase_ ( self :Optional[Any] ) -> str: '''simple docstring''' return self.user_data["username"] @property def lowercase_ ( self :str ) -> str: '''simple docstring''' return self.user_data["full_name"] @property def lowercase_ ( self :Union[str, Any] ) -> str: '''simple docstring''' return self.user_data["biography"] @property def lowercase_ ( self :str ) -> str: '''simple docstring''' return self.user_data["business_email"] @property def lowercase_ ( self :Tuple ) -> str: '''simple docstring''' return self.user_data["external_url"] @property def lowercase_ ( self :int ) -> int: '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowercase_ ( self :List[Any] ) -> int: '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowercase_ ( self :Tuple ) -> int: '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowercase_ ( self :Tuple ) -> str: '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowercase_ ( self :Dict ) -> bool: '''simple docstring''' return self.user_data["is_verified"] @property def lowercase_ ( self :Union[str, Any] ) -> bool: '''simple docstring''' return self.user_data["is_private"] def snake_case ( UpperCAmelCase = "github" )-> None: """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions __A = InstagramUser(UpperCAmelCase ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCAmelCase ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() a__ : List[str] = InstagramUser("github") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
161
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a__ : Dict = logging.getLogger(__name__) def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Optional[int]: """simple docstring""" return (preds == labels).mean() @dataclass class UpperCamelCase__ : UpperCAmelCase__ : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) UpperCAmelCase__ : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'}) UpperCAmelCase__ : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) UpperCAmelCase__ : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class UpperCamelCase__ : UpperCAmelCase__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())}) UpperCAmelCase__ : str = field(metadata={'help': 'Should contain the data files for the task.'}) UpperCAmelCase__ : int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) UpperCAmelCase__ : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'}) def snake_case ( )-> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __A , __A , __A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: __A = processors[data_args.task_name]() __A = processor.get_labels() __A = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __A = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets __A = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __A = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: __A = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator __A = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __A = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __A = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __A = trainer.evaluate() __A = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def snake_case ( UpperCAmelCase )-> List[str]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
161
1
"""simple docstring""" from math import factorial SCREAMING_SNAKE_CASE : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def __UpperCAmelCase ( snake_case_ : int ) -> int: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case_ ) ) def __UpperCAmelCase ( snake_case_ : int = 60 , snake_case_ : int = 1000000 ) -> int: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not isinstance(snake_case_ , snake_case_ ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length _lowerCAmelCase = 0 # the cached sizes of the previous chains _lowerCAmelCase = {} for start_chain_element in range(1 , snake_case_ ): # The temporary set will contain the elements of the chain _lowerCAmelCase = set() _lowerCAmelCase = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _lowerCAmelCase = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(snake_case_ ) chain_set_length += 1 _lowerCAmelCase = digit_factorial_sum(snake_case_ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _lowerCAmelCase = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
317
"""simple docstring""" from __future__ import annotations import queue class __lowerCamelCase : def __init__(self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = data _lowerCAmelCase = None _lowerCAmelCase = None def __UpperCAmelCase ( ) -> TreeNode: """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase = queue.Queue() _lowerCAmelCase = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() _lowerCAmelCase = F"""Enter the left node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = left_node q.put(snake_case_ ) _lowerCAmelCase = F"""Enter the right node of {node_found.data}: """ _lowerCAmelCase = input(snake_case_ ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase = TreeNode(int(snake_case_ ) ) _lowerCAmelCase = right_node q.put(snake_case_ ) raise def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = queue.Queue() q.put(snake_case_ ) while not q.empty(): _lowerCAmelCase = [] while not q.empty(): _lowerCAmelCase = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(snake_case_ ) _lowerCAmelCase = n.left # end of while means current node doesn't have left child _lowerCAmelCase = stack.pop() # start to traverse its right child _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase = [] _lowerCAmelCase = node while n or stack: while n: stack.append(snake_case_ ) _lowerCAmelCase = n.left _lowerCAmelCase = stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase = n.right def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None: """simple docstring""" if not isinstance(snake_case_ , snake_case_ ) or not node: return _lowerCAmelCase , _lowerCAmelCase = [], [] _lowerCAmelCase = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str: """simple docstring""" if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 ) return F"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('''Binary Tree Traversals''')) SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt('''Pre Order Traversal''')) pre_order(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal''')) in_order(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal''')) post_order(node) print(prompt() + '''\n''') print(prompt('''Level Order Traversal''')) level_order(node) print(prompt() + '''\n''') print(prompt('''Actual Level Order Traversal''')) level_order_actual(node) print('''*''' * 5_0 + '''\n''') print(prompt('''Pre Order Traversal - Iteration Version''')) pre_order_iter(node) print(prompt() + '''\n''') print(prompt('''In Order Traversal - Iteration Version''')) in_order_iter(node) print(prompt() + '''\n''') print(prompt('''Post Order Traversal - Iteration Version''')) post_order_iter(node) print(prompt())
317
1
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if b == 0: return 1 if (b % 2) == 0: return actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) ) else: return a * actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) ) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if b < 0: return 1 / actual_power(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return actual_power(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(power(-2, -3))
195
from manim import * class A_ ( __lowerCamelCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): lowercase = Rectangle(height=0.5 , width=0.5 ) lowercase = Rectangle(height=0.25 , width=0.25 ) lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowercase = [mem.copy() for i in range(6 )] lowercase = [mem.copy() for i in range(6 )] lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 ) lowercase = Text('CPU' , font_size=24 ) lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case ) lowercase = [mem.copy() for i in range(4 )] lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = Text('GPU' , font_size=24 ) lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case ) gpu.move_to([-1, -1, 0] ) self.add(snake_case ) lowercase = [mem.copy() for i in range(6 )] lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = Text('Model' , font_size=24 ) lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case ) model.move_to([3, -1.0, 0] ) self.add(snake_case ) lowercase = [] lowercase = [] lowercase = [] for i, rect in enumerate(snake_case ): rect.set_stroke(snake_case ) lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=snake_case , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case , buff=0.0 ) self.add(snake_case ) model_cpu_arr.append(snake_case ) self.add(*snake_case , *snake_case , *snake_case ) lowercase = [mem.copy() for i in range(6 )] lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = Text('Loaded Checkpoint' , font_size=24 ) lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case ) checkpoint.move_to([3, 0.5, 0] ) self.add(snake_case ) lowercase = [] lowercase = [] for i, rect in enumerate(snake_case ): lowercase = fill.copy().set_fill(snake_case , opacity=0.7 ) target.move_to(snake_case ) ckpt_arr.append(snake_case ) lowercase = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(snake_case ) self.add(*snake_case , *snake_case ) lowercase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowercase = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case , snake_case ) lowercase = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(snake_case ) lowercase = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowercase = [meta_mem.copy() for i in range(6 )] lowercase = [meta_mem.copy() for i in range(6 )] lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 ) lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 ) lowercase = Text('Disk' , font_size=24 ) lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(snake_case , run_time=3 ) , Write(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) ) lowercase = [] for i, rect in enumerate(snake_case ): lowercase = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(snake_case , run_time=1.5 ) ) self.play(*snake_case ) self.play(FadeOut(snake_case ) ) lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case , run_time=3 ) ) self.play( FadeOut(snake_case , snake_case , *snake_case , *snake_case ) , ) self.wait()
195
1
"""simple docstring""" __UpperCamelCase = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
353
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any: # Load configuration defined in the metadata file with open(SCREAMING_SNAKE_CASE_ ) as metadata_file: SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module'] # Load the entity vocab file SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f: SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = 'MLukeTokenizer' with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0] SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0] SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight'] SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE = state_dict[bias_name] SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.' SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight'] SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 ) SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias'] SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 ) SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval() state_dict.pop('entity_predictions.decoder.weight' ) state_dict.pop('lm_head.decoder.weight' ) state_dict.pop('lm_head.decoder.bias' ) SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )): SCREAMING_SNAKE_CASE = state_dict[key] else: SCREAMING_SNAKE_CASE = state_dict[key] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' ) if set(SCREAMING_SNAKE_CASE_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' ) SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).' SCREAMING_SNAKE_CASE = (0, 9) SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' ) SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) ) SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) ) SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.' SCREAMING_SNAKE_CASE = (24, 30) SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' ) SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist() SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) ) SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int: SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]'] SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )] SCREAMING_SNAKE_CASE = {} for entry in data: SCREAMING_SNAKE_CASE = entry['id'] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE = entity_id break SCREAMING_SNAKE_CASE = F'{language}:{entity_name}' SCREAMING_SNAKE_CASE = entity_id return new_mapping if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) __UpperCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
38
0
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DPMSolverSDEScheduler,) lowerCAmelCase__ = 10 def UpperCAmelCase__ ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(__SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE = sample.to(__SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: __SCREAMING_SNAKE_CASE = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
267
'''simple docstring''' import numpy as np def a__ ( a__ , a__ , a__ = 1E-1_2 , a__ = 1_00 , ): """simple docstring""" assert np.shape(a__ )[0] == np.shape(a__ )[1] # Ensure proper dimensionality. assert np.shape(a__ )[0] == np.shape(a__ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(a__ ) == np.iscomplexobj(a__ ) __SCREAMING_SNAKE_CASE = np.iscomplexobj(a__ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(a__ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1E1_2 while not convergence: # Multiple matrix by the vector. __SCREAMING_SNAKE_CASE = np.dot(a__ , a__ ) # Normalize the resulting output vector. __SCREAMING_SNAKE_CASE = w / np.linalg.norm(a__ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T __SCREAMING_SNAKE_CASE = np.dot(a__ , np.dot(a__ , a__ ) ) # Check convergence. __SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = lambda_ if is_complex: __SCREAMING_SNAKE_CASE = np.real(lambda_ ) return lambda_, vector def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ) __SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa ) __SCREAMING_SNAKE_CASE = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __SCREAMING_SNAKE_CASE = real_input_matrix __SCREAMING_SNAKE_CASE = real_vector elif problem_type == "complex": __SCREAMING_SNAKE_CASE = complex_input_matrix __SCREAMING_SNAKE_CASE = complex_vector # Our implementation. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = power_iteration(a__ , a__ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.linalg.eigh(a__ ) # Last eigenvalue is the maximum one. __SCREAMING_SNAKE_CASE = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __SCREAMING_SNAKE_CASE = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(a__ ) - np.abs(a__ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
267
1
'''simple docstring''' import fire from utils import calculate_rouge, save_json def lowerCamelCase__ ( _A , _A , _A=None , **_A ): a : Optional[Any] = [x.strip() for x in open(_A ).readlines()] a : Dict = [x.strip() for x in open(_A ).readlines()][: len(_A )] a : Optional[Any] = calculate_rouge(_A , _A , **_A ) if save_path is not None: save_json(_A , _A , indent=_A ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
351
'''simple docstring''' from math import factorial, pi def lowerCamelCase__ ( _A , _A = 30 ): if not isinstance(_A , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(_A , _A ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) a : Dict = float(_A ) a : List[Any] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) ) def lowerCamelCase__ ( _A , _A = 30 ): if not isinstance(_A , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(_A , _A ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) a : int = float(_A ) a : str = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(1_0)) print(maclaurin_sin(-1_0)) print(maclaurin_sin(1_0, 1_5)) print(maclaurin_sin(-1_0, 1_5)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(1_0, 1_5)) print(maclaurin_cos(-1_0, 1_5))
96
0
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : str = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def lowerCAmelCase_ ( snake_case_,snake_case_ ): for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) _A : Tuple = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) _A : Tuple = in_proj_weight[ : encoder_config.hidden_size, : ] _A : Optional[int] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] _A : Dict = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : List[Any] = dct.pop(snake_case_ ) _A : Dict = val def lowerCAmelCase_ ( snake_case_ ): if "handwritten" in checkpoint_url: _A : Union[str, Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: _A : Dict = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" _A : Union[str, Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Any = ViTConfig(image_size=384,qkv_bias=snake_case_ ) _A : Tuple = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: _A : int = 768 elif "large" in checkpoint_url: # use ViT-large encoder _A : int = 1024 _A : Tuple = 4096 _A : Union[str, Any] = 24 _A : str = 16 _A : int = 1024 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: _A : Optional[Any] = False _A : Union[str, Any] = """relu""" _A : int = 1024 _A : Union[str, Any] = True _A : Union[str, Any] = False _A : Optional[Any] = False # load HuggingFace model _A : List[str] = ViTModel(snake_case_,add_pooling_layer=snake_case_ ) _A : List[str] = TrOCRForCausalLM(snake_case_ ) _A : Any = VisionEncoderDecoderModel(encoder=snake_case_,decoder=snake_case_ ) model.eval() # load state_dict of original model, rename some keys _A : Dict = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""",check_hash=snake_case_ )["""model"""] _A : Optional[int] = create_rename_keys(snake_case_,snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_,snake_case_,snake_case_ ) read_in_q_k_v(snake_case_,snake_case_ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): _A : List[str] = state_dict.pop(snake_case_ ) if key.startswith("""decoder""" ) and "output_projection" not in key: _A : Optional[int] = val else: _A : Dict = val # load state dict model.load_state_dict(snake_case_ ) # Check outputs on an image _A : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) _A : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" ) _A : Dict = TrOCRProcessor(snake_case_,snake_case_ ) _A : Tuple = processor(images=prepare_img(snake_case_ ),return_tensors="""pt""" ).pixel_values # verify logits _A : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) _A : Optional[int] = model(pixel_values=snake_case_,decoder_input_ids=snake_case_ ) _A : int = outputs.logits _A : Optional[int] = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: _A : Dict = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: _A : Tuple = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: _A : int = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: _A : Optional[int] = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10],snake_case_,atol=1e-3 ), "First elements of logits not as expected" Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case_ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) _snake_case = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __A : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __A : int = 25_0004 __A : Tuple = 25_0020 @require_sentencepiece @require_tokenizers class __A ( lowerCAmelCase , unittest.TestCase ): lowerCAmelCase_ : List[str] = MBartTokenizer lowerCAmelCase_ : List[Any] = MBartTokenizerFast lowerCAmelCase_ : int = True lowerCAmelCase_ : Tuple = True def lowercase__ ( self : int ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : Any = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : List[Any] ): lowerCAmelCase : Dict = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) lowerCAmelCase : List[str] = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def lowercase__ ( self : Tuple ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase : str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : int = tempfile.mkdtemp() lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) lowerCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase : List[Any] = tempfile.mkdtemp() lowerCAmelCase : int = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) lowerCAmelCase : Dict = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ ) lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase : Optional[int] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): lowerCAmelCase_ : str = "facebook/mbart-large-en-ro" lowerCAmelCase_ : List[Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowerCAmelCase_ : Any = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowerCAmelCase_ : List[Any] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def lowercase__ ( cls : int ): lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) lowerCAmelCase : int = 1 return cls def lowercase__ ( self : str ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 ) def lowercase__ ( self : str ): lowerCAmelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) def lowercase__ ( self : int ): self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids ) lowerCAmelCase : Optional[int] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowerCAmelCase : List[str] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ ) def lowercase__ ( self : List[str] ): lowerCAmelCase : int = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , UpperCAmelCase_ ) lowerCAmelCase : Any = 10 lowerCAmelCase : str = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) def lowercase__ ( self : Optional[Any] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : Any = tempfile.mkdtemp() lowerCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase : Union[str, Any] = MBartTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ ) @require_torch def lowercase__ ( self : Optional[int] ): lowerCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt' ) lowerCAmelCase : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowercase__ ( self : Any ): lowerCAmelCase : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowerCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowercase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' ) lowerCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' ) lowerCAmelCase : List[Any] = targets['input_ids'] lowerCAmelCase : List[str] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : str ): lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) , { # A, test, EOS, en_XX 'input_ids': [[62, 3034, 2, 250004]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 250001, } , )
366
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __A : List[Any] = logging.get_logger(__name__) __A : List[Any] = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __A ( lowerCAmelCase ): lowerCAmelCase_ : Union[str, Any] = "deberta-v2" def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase : Dict = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : str = num_attention_heads lowerCAmelCase : List[str] = intermediate_size lowerCAmelCase : List[Any] = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : int = attention_probs_dropout_prob lowerCAmelCase : Any = max_position_embeddings lowerCAmelCase : str = type_vocab_size lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : Union[str, Any] = relative_attention lowerCAmelCase : List[Any] = max_relative_positions lowerCAmelCase : List[Any] = pad_token_id lowerCAmelCase : Optional[Any] = position_biased_input # Backwards compatibility if type(UpperCAmelCase_ ) == str: lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )] lowerCAmelCase : str = pos_att_type lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Optional[Any] = layer_norm_eps lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ ) lowerCAmelCase : Tuple = pooler_dropout lowerCAmelCase : Union[str, Any] = pooler_hidden_act class __A ( lowerCAmelCase ): @property def lowercase__ ( self : Optional[Any] ): if self.task == "multiple-choice": lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def lowercase__ ( self : int ): return 12 def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ): lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
323
0
import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Any = PhobertTokenizer lowerCamelCase_ : List[str] = False def _lowercase ( self ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase : Union[str, Any] = ["T@@", "i", "I", "R@@", "r", "e@@"] lowerCamelCase : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase : Union[str, Any] = ["#version: 0.2", "l à</w>"] lowerCamelCase : Dict = {"unk_token": "<unk>"} lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase__ ) ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Union[str, Any] = "Tôi là VinAI Research" lowerCamelCase : List[Any] = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def _lowercase ( self ) -> Tuple: lowerCamelCase : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase : Any = "Tôi là VinAI Research" lowerCamelCase : str = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() lowerCamelCase : Tuple = tokenizer.tokenize(UpperCamelCase__ ) print(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token] lowerCamelCase : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
48
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() A__ : Optional[int] = logging.get_logger(__name__) A__ : List[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } A__ : Tuple = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ) -> Tuple: for attribute in key.split('.' ): __lowerCamelCase : List[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: __lowerCamelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: __lowerCamelCase : Any = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": __lowerCamelCase : Tuple = value elif weight_type == "weight_g": __lowerCamelCase : Optional[int] = value elif weight_type == "weight_v": __lowerCamelCase : str = value elif weight_type == "bias": __lowerCamelCase : List[Any] = value else: __lowerCamelCase : List[str] = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]: __lowerCamelCase : Optional[Any] = [] __lowerCamelCase : Dict = fairseq_model.state_dict() __lowerCamelCase : List[str] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCamelCase : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCamelCase : int = True if "*" in mapped_key: __lowerCamelCase : Optional[int] = name.split(UpperCAmelCase_ )[0].split('.' )[-2] __lowerCamelCase : List[str] = mapped_key.replace('*' , UpperCAmelCase_ ) if "weight_g" in name: __lowerCamelCase : Dict = 'weight_g' elif "weight_v" in name: __lowerCamelCase : Any = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __lowerCamelCase : Any = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCamelCase : List[Any] = 'weight' else: __lowerCamelCase : str = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'Unused weights: {unused_weights}' ) def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Tuple: __lowerCamelCase : List[str] = full_name.split('conv_layers.' )[-1] __lowerCamelCase : List[Any] = name.split('.' ) __lowerCamelCase : Any = int(items[0] ) __lowerCamelCase : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __lowerCamelCase : Union[str, Any] = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __lowerCamelCase : Tuple = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) __lowerCamelCase : str = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __lowerCamelCase : List[Any] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase_ ) @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=None ) -> Optional[int]: # load the pre-trained checkpoints __lowerCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = WavLMConfigOrig(checkpoint['cfg'] ) __lowerCamelCase : Union[str, Any] = WavLMOrig(UpperCAmelCase_ ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __lowerCamelCase : Optional[int] = WavLMConfig.from_pretrained(UpperCAmelCase_ ) else: __lowerCamelCase : Any = WavLMConfig() __lowerCamelCase : Optional[int] = WavLMModel(UpperCAmelCase_ ) recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ ) hf_wavlm.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": A__ : List[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") A__ : List[str] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
185
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = OpenAIGPTTokenizer UpperCamelCase = OpenAIGPTTokenizerFast UpperCamelCase = True UpperCamelCase = False def a__ ( self : str ) -> Any: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowerCamelCase_ = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) lowerCamelCase_ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', ''] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(_snake_case ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(_snake_case ) ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = 'lower' lowerCamelCase_ = ['low', 'er</w>'] lowerCamelCase_ = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) lowerCamelCase_ = tokens + ['<unk>'] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def a__ ( self : int , A_ : List[Any]=15 ) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) # Simple input lowerCamelCase_ = 'This is a simple input' lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2'] lowerCamelCase_ = ('This is a simple input', 'This is a pair') lowerCamelCase_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' ) # Simple input self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' ) # Simple input self.assertRaises( _snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , ) # Pair input self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' ) # Pair input self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' ) # Pair input self.assertRaises( _snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class A( UpperCamelCase ): '''simple docstring''' pass
357
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
208
0
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } _UpperCAmelCase = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: int =EfficientNetConfig() SCREAMING_SNAKE_CASE_: Any =CONFIG_MAP[model_name]["""hidden_dim"""] SCREAMING_SNAKE_CASE_: Optional[int] =CONFIG_MAP[model_name]["""width_coef"""] SCREAMING_SNAKE_CASE_: List[str] =CONFIG_MAP[model_name]["""depth_coef"""] SCREAMING_SNAKE_CASE_: List[Any] =CONFIG_MAP[model_name]["""image_size"""] SCREAMING_SNAKE_CASE_: List[Any] =CONFIG_MAP[model_name]["""dropout_rate"""] SCREAMING_SNAKE_CASE_: Dict =CONFIG_MAP[model_name]["""dw_padding"""] SCREAMING_SNAKE_CASE_: List[str] ="""huggingface/label-files""" SCREAMING_SNAKE_CASE_: Optional[Any] ="""imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE_: List[str] =1000 SCREAMING_SNAKE_CASE_: Any =json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE_: List[Any] ={int(_a ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: Optional[Any] =idalabel SCREAMING_SNAKE_CASE_: Dict ={v: k for k, v in idalabel.items()} return config def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: Dict ="""http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE_: Optional[int] =Image.open(requests.get(_a , stream=_a ).raw ) return im def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: Dict =CONFIG_MAP[model_name]["""image_size"""] SCREAMING_SNAKE_CASE_: Union[str, Any] =EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_a , ) return preprocessor def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: List[str] =[v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] SCREAMING_SNAKE_CASE_: int =sorted(set(_a ) ) SCREAMING_SNAKE_CASE_: Dict =len(_a ) SCREAMING_SNAKE_CASE_: Union[str, Any] ={b: str(_a ) for b, i in zip(_a , range(_a ) )} SCREAMING_SNAKE_CASE_: Tuple =[] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: SCREAMING_SNAKE_CASE_: int =block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) SCREAMING_SNAKE_CASE_: Tuple ={} for item in rename_keys: if item[0] in original_param_names: SCREAMING_SNAKE_CASE_: Any ="""efficientnet.""" + item[1] SCREAMING_SNAKE_CASE_: List[str] ="""classifier.weight""" SCREAMING_SNAKE_CASE_: List[str] ="""classifier.bias""" return key_mapping def __magic_name__ ( lowercase , lowercase , lowercase ): for key, value in tf_params.items(): if "normalization" in key: continue SCREAMING_SNAKE_CASE_: Optional[Any] =key_mapping[key] if "_conv" in key and "kernel" in key: SCREAMING_SNAKE_CASE_: Tuple =torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: SCREAMING_SNAKE_CASE_: List[Any] =torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: SCREAMING_SNAKE_CASE_: str =torch.from_numpy(np.transpose(_a ) ) else: SCREAMING_SNAKE_CASE_: List[str] =torch.from_numpy(_a ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_a ) @torch.no_grad() def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Tuple =model_classes[model_name]( include_top=_a , weights="""imagenet""" , input_tensor=_a , input_shape=_a , pooling=_a , classes=1000 , classifier_activation="""softmax""" , ) SCREAMING_SNAKE_CASE_: int =original_model.trainable_variables SCREAMING_SNAKE_CASE_: Optional[int] =original_model.non_trainable_variables SCREAMING_SNAKE_CASE_: List[str] ={param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: SCREAMING_SNAKE_CASE_: str =param.numpy() SCREAMING_SNAKE_CASE_: Tuple =list(tf_params.keys() ) # Load HuggingFace model SCREAMING_SNAKE_CASE_: Optional[Any] =get_efficientnet_config(_a ) SCREAMING_SNAKE_CASE_: List[str] =EfficientNetForImageClassification(_a ).eval() SCREAMING_SNAKE_CASE_: Tuple =hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) SCREAMING_SNAKE_CASE_: Optional[Any] =rename_keys(_a ) replace_params(_a , _a , _a ) # Initialize preprocessor and preprocess input image SCREAMING_SNAKE_CASE_: int =convert_image_processor(_a ) SCREAMING_SNAKE_CASE_: Tuple =preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_: Dict =hf_model(**_a ) SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits.detach().numpy() # Original model inference SCREAMING_SNAKE_CASE_: Optional[Any] =False SCREAMING_SNAKE_CASE_: Tuple =CONFIG_MAP[model_name]["""image_size"""] SCREAMING_SNAKE_CASE_: Dict =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) SCREAMING_SNAKE_CASE_: Union[str, Any] =image.img_to_array(_a ) SCREAMING_SNAKE_CASE_: Dict =np.expand_dims(_a , axis=0 ) SCREAMING_SNAKE_CASE_: Optional[Any] =original_model.predict(_a ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_a , _a , atol=1e-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_a ): os.mkdir(_a ) # Save converted model and image processor hf_model.save_pretrained(_a ) preprocessor.save_pretrained(_a ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) SCREAMING_SNAKE_CASE_: List[Any] =f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_a ) hf_model.push_to_hub(_a ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") _UpperCAmelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
173
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A =logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') A ={ 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A ={ 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A =sorted(arg_to_scheduler.keys()) A ='{' + ', '.join(arg_to_scheduler_choices) + '}' class _a ( pl.LightningModule ): def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase ) UpperCAmelCase = 0 UpperCAmelCase = Path(self.hparams.output_dir ) UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , ) else: UpperCAmelCase = config UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase , lowercase ): assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute" setattr(self.config , lowercase , getattr(self.hparams , lowercase ) ) if tokenizer is None: UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , ) else: UpperCAmelCase = tokenizer UpperCAmelCase = MODEL_MODES[mode] if model is None: UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , ) else: UpperCAmelCase = model def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model UpperCAmelCase = ['''bias''', '''LayerNorm.weight'''] UpperCAmelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase = Adafactor( lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase ) else: UpperCAmelCase = AdamW( lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase = optimizer UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A ( self : List[Any] , lowercase : int , lowercase : List[str] ): '''simple docstring''' return self.validation_step(lowercase , lowercase ) def A ( self : List[Any] , lowercase : Tuple ): '''simple docstring''' return self.validation_end(lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A ( self : List[str] , lowercase : Any ): '''simple docstring''' if stage == "test": UpperCAmelCase = len(self.test_dataloader().dataset ) else: UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase ) UpperCAmelCase = len(self.train_dataloader().dataset ) def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A ( self : Union[str, Any] ): '''simple docstring''' return self.train_loader def A ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : List[Any] ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : Any , lowercase : Union[str, Any] ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A ( self : List[str] , lowercase : Dict[str, Any] ): '''simple docstring''' UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' ) UpperCAmelCase = self.step_count self.model.save_pretrained(lowercase ) self.tokenizer.save_pretrained(lowercase ) @staticmethod def A ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class _a ( pl.Callback ): def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase ) class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase ) def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log and save results to file UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase , '''w''' ) as writer: for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def snake_case_ (_a : int , _a : Optional[Any] ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_a , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ): pl.seed_everything(args.seed ) # init model UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_a ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_a ) if logging_callback is None: UpperCAmelCase = LoggingCallback() UpperCAmelCase = {} if args.fpaa: UpperCAmelCase = 1_6 if args.gpus > 1: UpperCAmelCase = '''auto''' UpperCAmelCase = '''ddp''' UpperCAmelCase = args.accumulate_grad_batches UpperCAmelCase = None UpperCAmelCase = '''auto''' UpperCAmelCase = pl.Trainer.from_argparse_args( _a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , ) if args.do_train: trainer.fit(_a ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
34
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase__ = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } lowerCamelCase__ = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } lowerCamelCase__ = '▁' # Segments (not really needed) lowerCamelCase__ = 0 lowerCamelCase__ = 1 lowerCamelCase__ = 2 lowerCamelCase__ = 3 lowerCamelCase__ = 4 class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : List[Any] = "left" lowerCAmelCase : Tuple = XLNetTokenizer def __init__( self : Tuple , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[int]="<unk>" , lowerCamelCase__ : Dict="<sep>" , lowerCamelCase__ : Dict="<pad>" , lowerCamelCase__ : Any="<cls>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : int=["<eop>", "<eod>"] , **lowerCamelCase__ : Any , ) ->Dict: '''simple docstring''' _UpperCAmelCase : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase : Optional[Any] = 3 _UpperCAmelCase : List[Any] = do_lower_case _UpperCAmelCase : Optional[Any] = remove_space _UpperCAmelCase : Tuple = keep_accents _UpperCAmelCase : str = vocab_file _UpperCAmelCase : List[str] = False if not self.vocab_file else True def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict = None ) ->List[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = [self.sep_token_id] _UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any = None ) ->List[int]: '''simple docstring''' _UpperCAmelCase : str = [self.sep_token_id] _UpperCAmelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : int = None ) ->Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCAmelCase : Union[str, Any] = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
371
'''simple docstring''' import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 lowerCamelCase__ = { 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 128, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 50, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 10, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 10, 'exponential_decay_length_penalty': (5, 1.01), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): @classmethod def lowerCAmelCase__ ( cls : List[str] ) ->str: '''simple docstring''' _UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(lowerCamelCase__ ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int: '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config" ) except HTTPError: pass def lowerCAmelCase__ ( self : int ) ->Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("test-config" , use_auth_token=self._token ) _UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token ) _UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : str = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token ) _UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token ) _UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) def lowerCAmelCase__ ( self : List[str] ) ->Any: '''simple docstring''' CustomConfig.register_for_auto_class() _UpperCAmelCase : int = CustomConfig(attribute=42 ) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} ) _UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig" ) self.assertEqual(new_config.attribute , 42 ) class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : Tuple = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated _UpperCAmelCase : Any = c.n_embd + 1 # int _UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float _UpperCAmelCase : Tuple = not c.scale_attn_weights # bool _UpperCAmelCase : List[Any] = c.summary_type + "foo" # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" ) self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" ) self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" ) self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = PretrainedConfig() _UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) _UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )] if len(lowerCamelCase__ ) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" F""" {', '.join(lowerCamelCase__ )}.""" ) def lowerCAmelCase__ ( self : Optional[int] ) ->int: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ) _UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" ) self.assertIsNotNone(lowerCamelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = mock.Mock() _UpperCAmelCase : List[str] = 5_00 _UpperCAmelCase : Dict = {} _UpperCAmelCase : Tuple = HTTPError _UpperCAmelCase : Any = {} # Download this model to make sure it's in the cache. _UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head: _UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase__ ( self : Optional[int] ) ->Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" ) _UpperCAmelCase : str = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCamelCase__ ) _UpperCAmelCase : Dict = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 _UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 _UpperCAmelCase : Dict = ["config.42.0.0.json"] _UpperCAmelCase : Union[str, Any] = 7_68 configuration.save_pretrained(lowerCamelCase__ ) shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) ) _UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(new_configuration.hidden_size , 7_68 ) def lowerCAmelCase__ ( self : List[str] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs" import transformers as new_transformers _UpperCAmelCase : Any = "v4.0.0" _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCamelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers _UpperCAmelCase : List[Any] = "v3.0.0" _UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(old_configuration.hidden_size , 7_68 )
322
0
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __A: def __init__( self , _snake_case , _snake_case=12 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=0.02 , _snake_case=0 , _snake_case=None , ) -> Dict: '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = use_labels __a = vocab_size __a = hidden_size __a = projection_dim __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = dropout __a = attention_dropout __a = max_position_embeddings __a = initializer_range __a = scope __a = bos_token_id def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: __a = input_mask.numpy() __a , __a = input_mask.shape __a = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_snake_case ): __a = 1 __a = 0 __a = self.get_config() return config, input_ids, tf.convert_to_tensor(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = TFBlipTextModel(config=_snake_case ) __a = model(_snake_case , attention_mask=_snake_case , training=_snake_case ) __a = model(_snake_case , training=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = self.prepare_config_and_inputs() __a , __a , __a = config_and_inputs __a = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __A( a , unittest.TestCase ): snake_case_ = (TFBlipTextModel,) if is_tf_available() else () snake_case_ = False snake_case_ = False snake_case_ = False def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' __a = BlipTextModelTester(self ) __a = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' pass @slow def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = TFBlipTextModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case=True ) -> Optional[Any]: '''simple docstring''' super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
6
snake_case_ : Dict = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
51
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : str = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCAmelCase = 'yolos' def __init__(self : List[str] , _lowerCAmelCase : Optional[int]=768 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : List[str]=3072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=1e-12 , _lowerCAmelCase : List[Any]=[512, 864] , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=100 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.1 , **_lowerCAmelCase : Dict , ): super().__init__(**a_ ) A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = num_detection_tokens A = use_mid_position_embeddings A = auxiliary_loss # Hungarian matcher A = class_cost A = bbox_cost A = giou_cost # Loss coefficients A = bbox_loss_coefficient A = giou_loss_coefficient A = eos_coefficient class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCAmelCase = version.parse('''1.11''' ) @property def A (self : str ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def A (self : Tuple ): return 1e-4 @property def A (self : Optional[int] ): return 12
359
'''simple docstring''' def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple: """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) ) else: return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) ) def __a ( UpperCAmelCase , UpperCAmelCase ) ->float: """simple docstring""" if b < 0: return 1 / actual_power(UpperCAmelCase , UpperCAmelCase ) return actual_power(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": print(power(-2, -3))
337
0
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase_ = 1_28 elif "12-12" in model_name: UpperCAmelCase_ = 12 UpperCAmelCase_ = 12 elif "14-14" in model_name: UpperCAmelCase_ = 14 UpperCAmelCase_ = 14 elif "16-16" in model_name: UpperCAmelCase_ = 16 UpperCAmelCase_ = 16 else: raise ValueError("Model not supported" ) UpperCAmelCase_ = "huggingface/label-files" if "speech-commands" in model_name: UpperCAmelCase_ = 35 UpperCAmelCase_ = "speech-commands-v2-id2label.json" else: UpperCAmelCase_ = 5_27 UpperCAmelCase_ = "audioset-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Any: '''simple docstring''' if "module.v" in name: UpperCAmelCase_ = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: UpperCAmelCase_ = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: UpperCAmelCase_ = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: UpperCAmelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: UpperCAmelCase_ = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase_ = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase_ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase_ = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: UpperCAmelCase_ = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ = orig_state_dict.pop(snake_case_ ) if "qkv" in key: UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ = int(key_split[3] ) UpperCAmelCase_ = config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[dim : dim * 2, :] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] else: UpperCAmelCase_ = val return orig_state_dict def lowerCAmelCase_ ( snake_case_ : Any ) -> str: '''simple docstring''' UpperCAmelCase_ = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(snake_case_ , snake_case_ ) @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = get_audio_spectrogram_transformer_config(snake_case_ ) UpperCAmelCase_ = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict UpperCAmelCase_ = model_name_to_url[model_name] UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" ) # remove some keys remove_keys(snake_case_ ) # rename some keys UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ ) # load 🤗 model UpperCAmelCase_ = ASTForAudioClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase_ = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978 UpperCAmelCase_ = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526 UpperCAmelCase_ = 10_24 if "speech-commands" not in model_name else 1_28 UpperCAmelCase_ = ASTFeatureExtractor(mean=snake_case_ , std=snake_case_ , max_length=snake_case_ ) if "speech-commands" in model_name: UpperCAmelCase_ = load_dataset("speech_commands" , "v0.02" , split="validation" ) UpperCAmelCase_ = dataset[0]["audio"]["array"] else: UpperCAmelCase_ = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(snake_case_ ) UpperCAmelCase_ = waveform.squeeze().numpy() UpperCAmelCase_ = feature_extractor(snake_case_ , sampling_rate=1_60_00 , return_tensors="pt" ) # forward pass UpperCAmelCase_ = model(**snake_case_ ) UpperCAmelCase_ = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase_ = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase_ = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase_ = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase_ = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase_ = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase_ = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase_ = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase_ = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(snake_case_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(f"""MIT/{model_name}""" ) feature_extractor.push_to_hub(f"""MIT/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple ={} class __A ( UpperCamelCase__ ): a__ : int = """llama""" a__ : Any = ["""past_key_values"""] def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_key_value_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = initializer_range UpperCAmelCase_ = rms_norm_eps UpperCAmelCase_ = pretraining_tp UpperCAmelCase_ = use_cache UpperCAmelCase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def _lowercase (self : List[str] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) UpperCAmelCase_ = self.rope_scaling.get("type" , __a ) UpperCAmelCase_ = self.rope_scaling.get("factor" , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
1
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "x" , _UpperCAmelCase = 10**-10 , _UpperCAmelCase = 1 , ): __a = symbols(_UpperCAmelCase ) __a = lambdify(_UpperCAmelCase , _UpperCAmelCase ) __a = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) ) __a = starting_point while True: if diff_function(_UpperCAmelCase ) != 0: __a = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function( _UpperCAmelCase ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess __a = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial # Find fourth Root of 5 print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}') # Find value of e print( '''The root of log(y) - 1 = 0 is ''', f'{newton_raphson("log(y) - 1", 2, variable="y")}', ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', f'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}', ) # Find root of cos(x) print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
131
def __snake_case ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('''Input value must be an \'int\' type''' ) __a = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
131
1
from math import factorial a__ = {str(digit): factorial(digit) for digit in range(10)} def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> int: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) ) def lowercase ( SCREAMING_SNAKE_CASE__ : int = 60 , SCREAMING_SNAKE_CASE__ : int = 1_000_000 ) -> int: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length _snake_case : Tuple = 0 # the cached sizes of the previous chains _snake_case : dict[int, int] = {} for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ): # The temporary set will contain the elements of the chain _snake_case : Optional[int] = set() _snake_case : int = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _snake_case : int = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(SCREAMING_SNAKE_CASE__ ) chain_set_length += 1 _snake_case : Optional[Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE__ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _snake_case : Optional[Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
317
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal a__ = datasets.utils.logging.get_logger(__name__) a__ = ["""names""", """prefix"""] a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""] a__ = ["""encoding_errors""", """on_bad_lines"""] a__ = ["""date_format"""] @dataclass class snake_case ( datasets.BuilderConfig ): '''simple docstring''' snake_case_ : str = "," snake_case_ : Optional[str] = None snake_case_ : Optional[Union[int, List[int], str]] = "infer" snake_case_ : Optional[List[str]] = None snake_case_ : Optional[List[str]] = None snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None snake_case_ : Optional[Union[List[int], List[str]]] = None snake_case_ : Optional[str] = None snake_case_ : bool = True snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None snake_case_ : Optional[list] = None snake_case_ : Optional[list] = None snake_case_ : bool = False snake_case_ : Optional[Union[int, List[int]]] = None snake_case_ : Optional[int] = None snake_case_ : Optional[Union[str, List[str]]] = None snake_case_ : bool = True snake_case_ : bool = True snake_case_ : bool = False snake_case_ : bool = True snake_case_ : Optional[str] = None snake_case_ : str = "." snake_case_ : Optional[str] = None snake_case_ : str = '"' snake_case_ : int = 0 snake_case_ : Optional[str] = None snake_case_ : Optional[str] = None snake_case_ : Optional[str] = None snake_case_ : Optional[str] = None snake_case_ : bool = True snake_case_ : bool = True snake_case_ : int = 0 snake_case_ : bool = True snake_case_ : bool = False snake_case_ : Optional[str] = None snake_case_ : int = 1_00_00 snake_case_ : Optional[datasets.Features] = None snake_case_ : Optional[str] = "strict" snake_case_ : Literal["error", "warn", "skip"] = "error" snake_case_ : Optional[str] = None def UpperCamelCase_ ( self : List[Any]) -> Dict: """simple docstring""" if self.delimiter is not None: _snake_case : str = self.delimiter if self.column_names is not None: _snake_case : str = self.column_names @property def UpperCamelCase_ ( self : List[Any]) -> str: """simple docstring""" _snake_case : Dict = { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class snake_case ( datasets.ArrowBasedBuilder ): '''simple docstring''' snake_case_ : Union[str, Any] = CsvConfig def UpperCamelCase_ ( self : str) -> List[str]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features) def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''') _snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files) if isinstance(lowerCAmelCase , (str, list, tuple)): _snake_case : int = data_files if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : int = [files] _snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})] _snake_case : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase , lowerCAmelCase): _snake_case : List[str] = [files] _snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files})) return splits def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table: """simple docstring""" if self.config.features is not None: _snake_case : List[str] = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()): # cheaper cast _snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase) else: # more expensive cast; allows str <-> int/float or str to Audio for example _snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase) return pa_table def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict: """simple docstring""" _snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _snake_case : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values()) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)): _snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs) try: for batch_idx, df in enumerate(lowerCAmelCase): _snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''') raise
317
1
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : Any = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : List[Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Tuple = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : str ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ) -> str: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
195
"""simple docstring""" a__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = input("Enter message: " ) __SCREAMING_SNAKE_CASE = input("Enter key [alphanumeric]: " ) __SCREAMING_SNAKE_CASE = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): __SCREAMING_SNAKE_CASE = "encrypt" __SCREAMING_SNAKE_CASE = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) elif mode.lower().startswith("d" ): __SCREAMING_SNAKE_CASE = "decrypt" __SCREAMING_SNAKE_CASE = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""\n{mode.title()}ed message:""" ) print(lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "encrypt" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "decrypt" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = key.upper() for symbol in message: __SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowerCAmelCase_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = 0 else: translated.append(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) if __name__ == "__main__": main()
195
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(string.lower() ) return len(lowerCamelCase_ ) == len(set(lowerCamelCase_ ) ) if __name__ == "__main__": __UpperCAmelCase = input("""Enter a string """).strip() __UpperCAmelCase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
323
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ): requires_backends(self , ["""bs4"""] ) super().__init__(**__lowerCamelCase ) def _A ( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[int] = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) UpperCamelCase :Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _A ( self : Any , __lowerCamelCase : Tuple ): UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :Tuple = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = """""" for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCamelCase :List[Any] = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): UpperCamelCase :Any = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ F"""but is of type {type(__lowerCamelCase )}.""" ) UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: UpperCamelCase :Any = [html_strings] # Get nodes + xpaths UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) UpperCamelCase :int = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths} UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
38
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[Any]: """simple docstring""" return EnvironmentCommand() def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str: """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class lowerCAmelCase_ ( lowerCamelCase__ ): '''simple docstring''' @staticmethod def UpperCamelCase__ ( _UpperCAmelCase ): snake_case_ = parser.add_parser('''env''' ) download_parser.set_defaults(func=_UpperCAmelCase ) download_parser.add_argument( '''--accelerate-config_file''' , default=_UpperCAmelCase , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , *_UpperCAmelCase ): snake_case_ = accelerate_config_file def UpperCamelCase__ ( self ): snake_case_ = '''not installed''' if is_safetensors_available(): import safetensors snake_case_ = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors snake_case_ = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' snake_case_ = '''not installed''' snake_case_ = snake_case_ = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file snake_case_ = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_UpperCAmelCase ): snake_case_ = load_config_from_file(self._accelerate_config_file ).to_dict() snake_case_ = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else F'''\t{accelerate_config}''' ) snake_case_ = '''not installed''' snake_case_ = '''NA''' if is_torch_available(): import torch snake_case_ = torch.__version__ snake_case_ = torch.cuda.is_available() snake_case_ = '''not installed''' snake_case_ = '''NA''' if is_tf_available(): import tensorflow as tf snake_case_ = tf.__version__ try: # deprecated in v2.1 snake_case_ = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool snake_case_ = bool(tf.config.list_physical_devices('''GPU''' ) ) snake_case_ = '''not installed''' snake_case_ = '''not installed''' snake_case_ = '''not installed''' snake_case_ = '''NA''' if is_flax_available(): import flax import jax import jaxlib snake_case_ = flax.__version__ snake_case_ = jax.__version__ snake_case_ = jaxlib.__version__ snake_case_ = jax.lib.xla_bridge.get_backend().platform snake_case_ = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F'''{safetensors_version}''', '''Accelerate version''': F'''{accelerate_version}''', '''Accelerate config''': F'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''', '''Jax version''': F'''{jax_version}''', '''JaxLib version''': F'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(_UpperCAmelCase ) ) return info @staticmethod def UpperCamelCase__ ( _UpperCAmelCase ): return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
358
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __snake_case = PegasusTokenizer __snake_case = PegasusTokenizerFast __snake_case = True __snake_case = True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ = PegasusTokenizer(_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase__ ( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def UpperCamelCase__ ( self , **_UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def UpperCamelCase__ ( self , _UpperCAmelCase ): return ("This is a test", "This is a test") def UpperCamelCase__ ( self ): snake_case_ = '''</s>''' snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def UpperCamelCase__ ( self ): snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(_UpperCAmelCase ) , 11_03 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 11_03 ) def UpperCamelCase__ ( self ): snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0] snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase__ ( self ): snake_case_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word snake_case_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' snake_case_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase__ ( self ): snake_case_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_61_03 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_03 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 10_24 snake_case_ = '''To ensure a smooth flow of bank resolutions.''' snake_case_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1] snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def UpperCamelCase__ ( self ): snake_case_ = ['''This is going to be way too long.''' * 1_50, '''short example'''] snake_case_ = ['''not super long but more than 5 tokens''', '''tiny'''] snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' ) snake_case_ = self._large_tokenizer( text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 10_24) assert batch.attention_mask.shape == (2, 10_24) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def UpperCamelCase__ ( self ): # fmt: off snake_case_ = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __snake_case = PegasusTokenizer __snake_case = PegasusTokenizerFast __snake_case = True __snake_case = True def UpperCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase__ ( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def UpperCamelCase__ ( self , **_UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def UpperCamelCase__ ( self , _UpperCAmelCase ): return ("This is a test", "This is a test") def UpperCamelCase__ ( self ): snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) snake_case_ = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0] snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @require_torch def UpperCamelCase__ ( self ): snake_case_ = ['''This is going to be way too long.''' * 10_00, '''short example'''] snake_case_ = ['''not super long but more than 5 tokens''', '''tiny'''] snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' ) snake_case_ = self._large_tokenizer( text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 40_96) assert batch.attention_mask.shape == (2, 40_96) assert targets["input_ids"].shape == (2, 5) assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask. def UpperCamelCase__ ( self ): snake_case_ = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) snake_case_ = self._large_tokenizer(_UpperCAmelCase ).input_ids self.assertListEqual( _UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
267
0
'''simple docstring''' import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( _lowercase , unittest.TestCase ): lowerCamelCase : Optional[int] = TransfoXLTokenizer lowerCamelCase : Any = False lowerCamelCase : Union[str, Any] = False def UpperCAmelCase__ (self ): super().setUp() lowerCamelCase_ : str = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] lowerCamelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def UpperCAmelCase__ (self , **A ): lowerCamelCase_ : Optional[int] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A ) def UpperCAmelCase__ (self , A ): lowerCamelCase_ : int = '''<unk> UNwanted , running''' lowerCamelCase_ : str = '''<unk> unwanted, running''' return input_text, output_text def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[int] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A ) lowerCamelCase_ : Dict = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = TransfoXLTokenizer(lower_case=A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = TransfoXLTokenizer(lower_case=A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[int] = TransfoXLTokenizer(lower_case=A ) lowerCamelCase_ : Dict = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' lowerCamelCase_ : List[Any] = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(A ) , A ) self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A ) def UpperCAmelCase__ (self ): lowerCamelCase_ : str = self.get_tokenizer() lowerCamelCase_ : List[str] = len(A ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(A ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
318
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowercase : int = logging.get_logger(__name__) def lowercase_ ( _lowercase ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ : Optional[int] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowerCamelCase_ : Optional[Any] = [144, 192, 240] lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowerCamelCase_ : List[str] = [96, 120, 144] lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowerCamelCase_ : Any = [64, 80, 96] lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320] lowerCamelCase_ : Union[str, Any] = 0.05 lowerCamelCase_ : Union[str, Any] = 2.0 if mobilevit_name.startswith('''deeplabv3_''' ): lowerCamelCase_ : Optional[Any] = 512 lowerCamelCase_ : Dict = 16 lowerCamelCase_ : Dict = 21 lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json''' else: lowerCamelCase_ : Any = 1_000 lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json''' lowerCamelCase_ : Optional[Any] = '''huggingface/label-files''' lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()} lowerCamelCase_ : List[str] = idalabel lowerCamelCase_ : str = {v: k for k, v in idalabel.items()} return config def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' ) if F""".global_rep.{i}.bias""" in name: lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' ) if ".global_rep." in name: lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): lowerCamelCase_ : Tuple = '''mobilevit.''' + name return name def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple: '''simple docstring''' if base_model: lowerCamelCase_ : List[str] = '''''' else: lowerCamelCase_ : Any = '''mobilevit.''' for key in orig_state_dict.copy().keys(): lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase ) if key[:8] == "encoder.": lowerCamelCase_ : int = key[8:] if "qkv" in key: lowerCamelCase_ : List[Any] = key.split('''.''' ) lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1 lowerCamelCase_ : Union[str, Any] = int(key_split[3] ) lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowerCamelCase_ : Optional[Any] = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: lowerCamelCase_ : List[str] = val[:dim, :] lowerCamelCase_ : Dict = val[dim : dim * 2, :] lowerCamelCase_ : Union[str, Any] = val[-dim:, :] else: lowerCamelCase_ : List[Any] = val[:dim] lowerCamelCase_ : Optional[int] = val[dim : dim * 2] lowerCamelCase_ : int = val[-dim:] else: lowerCamelCase_ : int = val return orig_state_dict def lowercase_ ( ) -> str: '''simple docstring''' lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im @torch.no_grad() def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple: '''simple docstring''' lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase ) # load original state_dict lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval() else: lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval() lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCamelCase_ : Optional[int] = model(**_lowercase ) lowerCamelCase_ : List[str] = outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowerCamelCase_ : Union[str, Any] = torch.tensor( [ [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]], [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]], [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowerCamelCase_ : Dict = torch.tensor( [ [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]], [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]], [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowerCamelCase_ : List[str] = torch.tensor( [ [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]], [[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]], [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 ) else: assert logits.shape == (1, 1_000) if mobilevit_name == "mobilevit_s": lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] ) elif mobilevit_name == "mobilevit_xs": lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] ) elif mobilevit_name == "mobilevit_xxs": lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if push_to_hub: lowerCamelCase_ : str = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) lowerCamelCase_ : int = model_mapping[mobilevit_name] image_processor.push_to_hub(_lowercase , organization='''apple''' ) model.push_to_hub(_lowercase , organization='''apple''' ) if __name__ == "__main__": __lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __lowercase : Optional[int] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
318
1
_A : Optional[Any] ={ '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
367
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) _A : Dict =parser.parse_args() _A : List[str] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _A : Any =CLIPImageProcessor() _A : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') _A : Union[str, Any] =UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
129
0
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py lowercase_ = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. lowercase_ = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. lowercase_ = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") lowercase_ = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. lowercase_ = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) lowercase_ = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def lowercase ( lowerCAmelCase__ : Any ) -> Optional[int]: __a = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ ) return [m.group(0 ) for m in matches] def lowercase ( ) -> Optional[int]: __a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __a = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __a = collections.defaultdict(lowerCAmelCase__ ) __a = collections.defaultdict(lowerCAmelCase__ ) __a = collections.defaultdict(lowerCAmelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(lowerCAmelCase__ ): __a = None if _re_tf_models.match(lowerCAmelCase__ ) is not None: __a = tf_models __a = _re_tf_models.match(lowerCAmelCase__ ).groups()[0] elif _re_flax_models.match(lowerCAmelCase__ ) is not None: __a = flax_models __a = _re_flax_models.match(lowerCAmelCase__ ).groups()[0] elif _re_pt_models.match(lowerCAmelCase__ ) is not None: __a = pt_models __a = _re_pt_models.match(lowerCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(lowerCAmelCase__ ) > 0: if attr_name in model_prefix_to_model_type: __a = True break # Try again after removing the last word in the name __a = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] ) __a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __a = list(lowerCAmelCase__ ) all_models.sort() __a = {'model_type': all_models} __a = [pt_models[t] for t in all_models] __a = [tf_models[t] for t in all_models] __a = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __a = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __a = 'AutoProcessor' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __a = 'AutoTokenizer' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __a = 'AutoFeatureExtractor' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __a = 'AutoTokenizer' __a = [processors[t] for t in all_models] return pd.DataFrame(lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> List[str]: __a = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __a = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}'''] __a = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): continue # First extract all model_names __a = [] for name in getattr(lowerCAmelCase__ , lowerCAmelCase__ ).values(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): model_names.append(lowerCAmelCase__ ) else: model_names.extend(list(lowerCAmelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> List[Any]: __a = get_frameworks_table() __a = Dataset.from_pandas(lowerCAmelCase__ ) __a = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=lowerCAmelCase__ ) __a = Dataset.from_json(lowerCAmelCase__ ) __a = { tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class']) for i in range(len(lowerCAmelCase__ ) ) } __a = update_pipeline_and_auto_class_table(lowerCAmelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __a = sorted(table.keys() ) __a = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __a = Dataset.from_pandas(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(lowerCAmelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(lowerCAmelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: __a = ( f'''Update with commit {commit_sha}\n\nSee: ''' f'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: __a = 'Update' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=lowerCAmelCase__ , repo_type='''dataset''' , token=lowerCAmelCase__ , commit_message=lowerCAmelCase__ , ) def lowercase ( ) -> str: __a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __a = transformers_module.pipelines.SUPPORTED_TASKS __a = [] for key in pipeline_tasks: if key not in in_table: __a = pipeline_tasks[key]['pt'] if isinstance(lowerCAmelCase__ , (list, tuple) ): __a = model[0] __a = model.__name__ if model not in in_table.values(): missing.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: __a = ', '.join(lowerCAmelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") lowercase_ = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
45
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} lowerCAmelCase : Dict = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } lowerCAmelCase : Dict = { 'allenai/longformer-base-4096': 40_96, 'allenai/longformer-large-4096': 40_96, 'allenai/longformer-large-4096-finetuned-triviaqa': 40_96, 'allenai/longformer-base-4096-extra.pos.embd.only': 40_96, 'allenai/longformer-large-4096-extra.pos.embd.only': 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def A_ ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) SCREAMING_SNAKE_CASE_ : List[str] = bs[:] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(a ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE_ : int = [chr(a ) for n in cs] return dict(zip(a , a ) ) def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = set() SCREAMING_SNAKE_CASE_ : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE_ : Any = char return pairs class _A ( __magic_name__): SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token SCREAMING_SNAKE_CASE_ : int = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token SCREAMING_SNAKE_CASE_ : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token super().__init__( errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle: SCREAMING_SNAKE_CASE_ : List[str] = json.load(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[int] = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ : int = errors # how to handle errors in decoding SCREAMING_SNAKE_CASE_ : List[Any] = bytes_to_unicode() SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle: SCREAMING_SNAKE_CASE_ : Optional[int] = merges_handle.read().split('\n' )[1:-1] SCREAMING_SNAKE_CASE_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE_ : Dict = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) SCREAMING_SNAKE_CASE_ : Dict = {} SCREAMING_SNAKE_CASE_ : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE_ : Tuple = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def UpperCAmelCase ( self ): """simple docstring""" return len(self.encoder ) def UpperCAmelCase ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Dict = get_pairs(_SCREAMING_SNAKE_CASE ) if not pairs: return token while True: SCREAMING_SNAKE_CASE_ : int = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = bigram SCREAMING_SNAKE_CASE_ : List[Any] = [] SCREAMING_SNAKE_CASE_ : List[Any] = 0 while i < len(_SCREAMING_SNAKE_CASE ): try: SCREAMING_SNAKE_CASE_ : Any = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE_ : Tuple = j if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE_ : str = tuple(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = new_word if len(_SCREAMING_SNAKE_CASE ) == 1: break else: SCREAMING_SNAKE_CASE_ : Any = get_pairs(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : List[Any] = ' '.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = word return word def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = [] for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) return bpe_tokens def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return self.decoder.get(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''.join(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE_ : Tuple = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ' Please check that the tokenizer is not corrupted!' ) SCREAMING_SNAKE_CASE_ : List[Any] = token_index writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' ) index += 1 return vocab_file, merge_file def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id] SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE_ : List[Any] = ' ' + text return (text, kwargs)
253
0
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _a ( SCREAMING_SNAKE_CASE_ : List[str] ): __lowerCAmelCase = [] for line in lines: __lowerCAmelCase = re.sub(R"#.*" , "" , _a ) # remove comments if line: filtered_lines.append(_a ) __lowerCAmelCase = """\n""".join(_a ) # Make a hash from all this code __lowerCAmelCase = full_str.encode("utf-8" ) return shaaaa(_a ).hexdigest() # get importable module names and hash for caching UpperCamelCase__ = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCamelCase__ = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCamelCase__ = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name UpperCamelCase__ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
365
from pathlib import Path import fire from tqdm import tqdm def _a ( SCREAMING_SNAKE_CASE_ : Dict="ro" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="en" , SCREAMING_SNAKE_CASE_ : Optional[Any]="wmt16" , SCREAMING_SNAKE_CASE_ : List[str]=None ): try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) __lowerCAmelCase = F"""{src_lang}-{tgt_lang}""" print(F"""Converting {dataset}-{pair}""" ) __lowerCAmelCase = datasets.load_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if save_dir is None: __lowerCAmelCase = F"""{dataset}-{pair}""" __lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) for split in ds.keys(): print(F"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets __lowerCAmelCase = "val" if split == "validation" else split __lowerCAmelCase = save_dir.joinpath(F"""{fn}.source""" ) __lowerCAmelCase = save_dir.joinpath(F"""{fn}.target""" ) __lowerCAmelCase = src_path.open("w+" ) __lowerCAmelCase = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __lowerCAmelCase = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(F"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
102
0
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 100 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ) -> Any: """simple docstring""" snake_case_ : Any = False snake_case_ : Tuple = search_prob snake_case_ : Any = start_temperate snake_case_ : Dict = [] snake_case_ : Union[str, Any] = 0 snake_case_ : List[Any] = None while not search_end: snake_case_ : str = current_state.score() if best_state is None or current_score > best_state.score(): snake_case_ : List[str] = current_state scores.append(_UpperCamelCase ) iterations += 1 snake_case_ : List[str] = None snake_case_ : int = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to snake_case_ : int = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor snake_case_ : str = neighbors.pop(_UpperCamelCase ) snake_case_ : Dict = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: snake_case_ : Any = change * -1 # in case we are finding minimum if change > 0: # improves the solution snake_case_ : List[str] = picked_neighbor else: snake_case_ : Union[str, Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability snake_case_ : Optional[int] = picked_neighbor snake_case_ : Optional[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor snake_case_ : List[Any] = True else: snake_case_ : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_UpperCamelCase ) , _UpperCamelCase ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowerCAmelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) lowerCAmelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" return (3 * x**2) - (6 * y) lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
279
from math import factorial lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)} def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) ) def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length snake_case_ : Optional[Any] = 0 # the cached sizes of the previous chains snake_case_ : dict[int, int] = {} for start_chain_element in range(1 , _UpperCamelCase ): # The temporary set will contain the elements of the chain snake_case_ : List[str] = set() snake_case_ : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. snake_case_ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCamelCase ) chain_set_length += 1 snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] snake_case_ : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
279
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model @property def SCREAMING_SNAKE_CASE_( self ) -> List[str]: torch.manual_seed(0 ) lowerCamelCase_ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , ) return model @property def SCREAMING_SNAKE_CASE_( self ) -> List[str]: torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = self.dummy_uncond_unet lowerCamelCase_ = DDIMScheduler() lowerCamelCase_ = self.dummy_vq_model lowerCamelCase_ = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase ) ldm.to(lowercase ) ldm.set_progress_bar_config(disable=lowercase ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = ldm(generator=lowercase , num_inference_steps=2 , output_type="numpy" ).images lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = ldm(generator=lowercase , num_inference_steps=2 , output_type="numpy" , return_dict=lowercase )[0] lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase_ = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] ) lowerCamelCase_ = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" ) ldm.to(lowercase ) ldm.set_progress_bar_config(disable=lowercase ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = ldm(generator=lowercase , num_inference_steps=5 , output_type="numpy" ).images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase_ = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] ) lowerCamelCase_ = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
47
import copy import re class _SCREAMING_SNAKE_CASE : lowerCAmelCase__ = 'hp' lowerCAmelCase__ = {} lowerCAmelCase__ = None @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase , lowercase ) -> Tuple: lowerCamelCase_ = prefix lowerCamelCase_ = defaults cls.build_naming_info() @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]: if len(lowercase ) == 0: return "" lowerCamelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowercase ) + 1 ): lowerCamelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowerCamelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowercase ): lowerCamelCase_ = "" while integer != 0: lowerCamelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s lowerCamelCase_ = 0 while True: lowerCamelCase_ = word + "#" + int_to_alphabetic(lowercase ) if sword in info["reverse_short_word"]: continue else: lowerCamelCase_ = sword break lowerCamelCase_ = short_word lowerCamelCase_ = word return short_word @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> int: lowerCamelCase_ = param_name.split("_" ) lowerCamelCase_ = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowerCamelCase_ = ["", "_"] for separator in separators: lowerCamelCase_ = separator.join(lowercase ) if shortname not in info["reverse_short_param"]: lowerCamelCase_ = shortname lowerCamelCase_ = param_name return shortname return param_name @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]: lowerCamelCase_ = TrialShortNamer.shortname_for_key(lowercase , lowercase ) lowerCamelCase_ = short_name lowerCamelCase_ = param_name @classmethod def SCREAMING_SNAKE_CASE_( cls ) -> Dict: if cls.NAMING_INFO is not None: return lowerCamelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } lowerCamelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowercase , lowercase ) lowerCamelCase_ = info @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> Optional[int]: cls.build_naming_info() assert cls.PREFIX is not None lowerCamelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowerCamelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(lowercase , lowercase ): lowerCamelCase_ = 1 if v else 0 lowerCamelCase_ = "" if isinstance(lowercase , (int, float) ) else "-" lowerCamelCase_ = f'{key}{sep}{v}' name.append(lowercase ) return "_".join(lowercase ) @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> List[Any]: lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowerCamelCase_ = [] else: lowerCamelCase_ = repr.split("_" ) lowerCamelCase_ = {} for value in values: if "-" in value: lowerCamelCase_ , lowerCamelCase_ = value.split("-" ) else: lowerCamelCase_ = re.sub("[0-9.]" , "" , lowercase ) lowerCamelCase_ = float(re.sub("[^0-9.]" , "" , lowercase ) ) lowerCamelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] lowerCamelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: lowerCamelCase_ = cls.DEFAULTS[k] return parameters
47
1
import math def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ): return math.pow(lowerCAmelCase__ , 2 ) - a def __UpperCamelCase ( lowerCAmelCase__ : float ): return 2 * x def __UpperCamelCase ( lowerCAmelCase__ : float ): __a : Dict = 2.0 while start <= a: __a : List[Any] = math.pow(lowerCAmelCase__ , 2 ) return start def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 9_9_9_9 , lowerCAmelCase__ : float = 0.00_00_00_00_00_00_01 ): if a < 0: raise ValueError('''math domain error''' ) __a : List[Any] = get_initial_point(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ): __a : Optional[Any] = value __a : str = value - fx(lowerCAmelCase__ , lowerCAmelCase__ ) / fx_derivative(lowerCAmelCase__ ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
216
from __future__ import annotations def __UpperCamelCase ( lowerCAmelCase__ : list[float] , lowerCAmelCase__ : list[float] ): __a : Dict = sorted(numsa + numsa ) __a , __a : Optional[Any] = divmod(len(lowerCAmelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowercase__ =[float(x) for x in input('Enter the elements of first array: ').split()] lowercase__ =[float(x) for x in input('Enter the elements of second array: ').split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
216
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } lowerCamelCase__ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): for attribute in key.split('.' ): __lowerCAmelCase : Any = getattr(UpperCamelCase__ , UpperCamelCase__ ) if weight_type is not None: __lowerCAmelCase : Dict = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape else: __lowerCAmelCase : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowerCAmelCase : Optional[Any] = value elif weight_type == "weight_g": __lowerCAmelCase : Optional[Any] = value elif weight_type == "weight_v": __lowerCAmelCase : Tuple = value elif weight_type == "bias": __lowerCAmelCase : List[Any] = value else: __lowerCAmelCase : Optional[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Optional[int] = [] __lowerCAmelCase : List[str] = fairseq_model.state_dict() __lowerCAmelCase : Optional[Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Optional[int] = False if "conv_layers" in name: load_conv_layer( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , ) __lowerCAmelCase : Dict = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue __lowerCAmelCase : str = True if "*" in mapped_key: __lowerCAmelCase : Dict = name.split(UpperCamelCase__ )[0].split('.' )[-2] __lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , UpperCamelCase__ ) if "weight_g" in name: __lowerCAmelCase : str = 'weight_g' elif "weight_v" in name: __lowerCAmelCase : int = 'weight_v' elif "bias" in name: __lowerCAmelCase : str = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : List[str] = 'weight' else: __lowerCAmelCase : Tuple = None set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) continue if not is_used: unused_weights.append(UpperCamelCase__ ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = full_name.split('conv_layers.' )[-1] __lowerCAmelCase : str = name.split('.' ) __lowerCAmelCase : Optional[Any] = int(items[0] ) __lowerCAmelCase : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowerCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowerCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." ) __lowerCAmelCase : Any = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowerCAmelCase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase__ ) @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ): if config_path is not None: __lowerCAmelCase : Union[str, Any] = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ ) else: __lowerCAmelCase : Any = UniSpeechSatConfig() __lowerCAmelCase : Any = '' if is_finetuned: __lowerCAmelCase : List[str] = UniSpeechSatForCTC(UpperCamelCase__ ) else: __lowerCAmelCase : Any = UniSpeechSatForPreTraining(UpperCamelCase__ ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowerCAmelCase : int = model[0].eval() recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ ) hf_wavavec.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) lowerCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
367
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Optional[int] = ShapEPipeline A_ : str = ['prompt'] A_ : Any = ['prompt'] A_ : List[Any] = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] A_ : Optional[int] = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 8 @property def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE ) @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowerCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Dict = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __lowerCAmelCase : Union[str, Any] = ShapERenderer(**_SCREAMING_SNAKE_CASE ) return model def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = self.dummy_prior __lowerCAmelCase : str = self.dummy_text_encoder __lowerCAmelCase : List[Any] = self.dummy_tokenizer __lowerCAmelCase : str = self.dummy_renderer __lowerCAmelCase : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , ) __lowerCAmelCase : int = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : Any = 'cpu' __lowerCAmelCase : int = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Optional[Any] = output.images[0] __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCAmelCase : str = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = torch_device == 'cpu' __lowerCAmelCase : str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.get_dummy_components() __lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = 1 __lowerCAmelCase : List[Any] = 2 __lowerCAmelCase : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) for key in inputs.keys(): if key in self.batch_params: __lowerCAmelCase : Dict = batch_size * [inputs[key]] __lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __lowerCAmelCase : Dict = ShapEPipeline.from_pretrained('openai/shap-e' ) __lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __lowerCAmelCase : List[str] = pipe( 'a shark' , generator=_SCREAMING_SNAKE_CASE , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
182
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _lowercase ( lowerCAmelCase ): """simple docstring""" def __lt__(self , lowerCamelCase_ ): """simple docstring""" return self[-1] < other[-1] def __eq__(self , lowerCamelCase_ ): """simple docstring""" return self[-1] == other[-1] def a( A : list ) -> list: """simple docstring""" a = [] # sort into stacks for element in collection: a = Stack([element] ) a = bisect_left(A , A ) if i != len(A ): stacks[i].append(A ) else: stacks.append(A ) # use a heap-based merge to merge stack efficiently a = merge(*(reversed(A ) for stack in stacks) ) return collection if __name__ == "__main__": _lowercase: List[Any] = input("Enter numbers separated by a comma:\n").strip() _lowercase: int = [int(item) for item in user_input.split(",")] print(patience_sort(unsorted))
227
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu _lowercase: Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: _lowercase: List[str] = json.load(f) @require_torch class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return FSMTTokenizer.from_pretrained(lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = F'''facebook/wmt19-{pair}''' a = self.get_tokenizer(lowerCamelCase_ ) a = self.get_model(lowerCamelCase_ ) a = bleu_data[pair]["src"] a = bleu_data[pair]["tgt"] a = tokenizer(lowerCamelCase_ , return_tensors="pt" , truncation=lowerCamelCase_ , padding="longest" ).to(lowerCamelCase_ ) a = model.generate( input_ids=batch.input_ids , num_beams=8 , ) a = tokenizer.batch_decode( lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) a = calculate_bleu(lowerCamelCase_ , lowerCamelCase_ ) print(lowerCamelCase_ ) self.assertGreaterEqual(scores["bleu"] , lowerCamelCase_ )
227
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : str = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
365
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
0
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase_ : Optional[Any] = """""" UpperCAmelCase_ : int = """""" UpperCAmelCase_ : List[str] = """""" UpperCAmelCase_ : Any = """""" def _A (__a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = tweepy.OAuthHandler(__a , __a ) auth.set_access_token(__a , __a ) SCREAMING_SNAKE_CASE_ : Dict = tweepy.API(__a ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE_ : List[Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE_ : int = api.user_timeline(screen_name=__a , count=2_00 ) # save most recent tweets alltweets.extend(__a ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE_ : Optional[int] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__a ) > 0: print(f'getting tweets before {oldest}' ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE_ : Dict = api.user_timeline( screen_name=__a , count=2_00 , max_id=__a ) # save most recent tweets alltweets.extend(__a ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE_ : List[str] = alltweets[-1].id - 1 print(f'...{len(__a )} tweets downloaded so far' ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE_ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'new_{screen_name}_tweets.csv' , '''w''' ) as f: SCREAMING_SNAKE_CASE_ : List[Any] = csv.writer(__a ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(__a ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
91
"""simple docstring""" import re def lowercase_ ( _lowerCamelCase: str ) -> bool: '''simple docstring''' __lowerCamelCase : Union[str, Any] = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(_lowerCamelCase , _lowerCamelCase ) ) if __name__ == "__main__": __A = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
135
0
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''van''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : str=[6_4, 1_2_8, 3_2_0, 5_1_2] , SCREAMING_SNAKE_CASE__ : List[Any]=[3, 3, 1_2, 3] , SCREAMING_SNAKE_CASE__ : int=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1E-6 , SCREAMING_SNAKE_CASE__ : List[str]=1E-2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : int = image_size a_ : Dict = num_channels a_ : Optional[Any] = patch_sizes a_ : List[Any] = strides a_ : int = hidden_sizes a_ : List[Any] = depths a_ : int = mlp_ratios a_ : str = hidden_act a_ : str = initializer_range a_ : Union[str, Any] = layer_norm_eps a_ : List[Any] = layer_scale_init_value a_ : List[str] = drop_path_rate a_ : str = dropout_rate
356
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache a_ : Dict = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ ) # baseline - just load from_pretrained with normal network a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed a_ : List[str] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a_ : Any = '1' a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache a_ : Tuple = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ ) # baseline - just load from_pretrained with normal network a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed a_ : Dict = self.get_env() a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed a_ : Tuple = self.get_env() a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a_ : Dict = '1' a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: a_ : Union[str, Any] = '\nfrom transformers import pipeline\n ' a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' a_ : Union[str, Any] = self.get_env() a_ : Optional[Any] = '1' a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )] a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: a_ : Optional[int] = '\nfrom transformers import AutoModel\n ' a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed a_ : Optional[Any] = self.get_env() a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a_ : Optional[int] = '1' a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
120
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { 'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json', } class A ( UpperCAmelCase_ ): __UpperCAmelCase : Union[str, Any] = 'instructblip_vision_model' def __init__(self : Union[str, Any] , __UpperCAmelCase : Optional[int]=1_4_0_8 , __UpperCAmelCase : List[Any]=6_1_4_4 , __UpperCAmelCase : Optional[int]=3_9 , __UpperCAmelCase : Optional[Any]=1_6 , __UpperCAmelCase : Optional[Any]=2_2_4 , __UpperCAmelCase : Optional[Any]=1_4 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Tuple=1E-6 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Tuple=1E-10 , __UpperCAmelCase : int=True , **__UpperCAmelCase : Union[str, Any] , ) -> Dict: """simple docstring""" super().__init__(**__UpperCAmelCase ) UpperCAmelCase__ = hidden_size UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = patch_size UpperCAmelCase__ = image_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = hidden_act UpperCAmelCase__ = qkv_bias @classmethod def lowercase_ (cls : Union[str, Any] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Tuple ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": UpperCAmelCase__ = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class A ( UpperCAmelCase_ ): __UpperCAmelCase : List[Any] = 'instructblip_qformer' def __init__(self : str , __UpperCAmelCase : Tuple=3_0_5_2_2 , __UpperCAmelCase : Dict=7_6_8 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : Any=3_0_7_2 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Any=1_4_0_8 , **__UpperCAmelCase : List[Any] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = hidden_act UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = position_embedding_type UpperCAmelCase__ = cross_attention_frequency UpperCAmelCase__ = encoder_hidden_size @classmethod def lowercase_ (cls : Optional[Any] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Dict ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": UpperCAmelCase__ = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class A ( UpperCAmelCase_ ): __UpperCAmelCase : Dict = 'instructblip' __UpperCAmelCase : Optional[int] = True def __init__(self : Tuple , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=3_2 , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" super().__init__(**__UpperCAmelCase ) if vision_config is None: UpperCAmelCase__ = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: UpperCAmelCase__ = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: UpperCAmelCase__ = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) UpperCAmelCase__ = InstructBlipVisionConfig(**__UpperCAmelCase ) UpperCAmelCase__ = InstructBlipQFormerConfig(**__UpperCAmelCase ) UpperCAmelCase__ = text_config["model_type"] if "model_type" in text_config else "opt" UpperCAmelCase__ = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase ) UpperCAmelCase__ = self.text_config.tie_word_embeddings UpperCAmelCase__ = self.text_config.is_encoder_decoder UpperCAmelCase__ = num_query_tokens UpperCAmelCase__ = self.vision_config.hidden_size UpperCAmelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES UpperCAmelCase__ = 1.0 UpperCAmelCase__ = 0.02 @classmethod def lowercase_ (cls : List[Any] , __UpperCAmelCase : InstructBlipVisionConfig , __UpperCAmelCase : InstructBlipQFormerConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : List[Any] , ) -> Tuple: """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , ) def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ = self.vision_config.to_dict() UpperCAmelCase__ = self.qformer_config.to_dict() UpperCAmelCase__ = self.text_config.to_dict() UpperCAmelCase__ = self.__class__.model_type return output
65
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. lowerCamelCase__ : Any = 10 def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int: for i in range(_lowerCAmelCase, _lowerCAmelCase ): if array[i] == target: return i return -1 def UpperCamelCase ( _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int: _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[int] = len(_lowerCAmelCase ) while left <= right: if right - left < precision: return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : str = (left + right) // 3 + 1 _UpperCAmelCase : int = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: _UpperCAmelCase : Tuple = one_third - 1 elif array[two_third] < target: _UpperCAmelCase : Any = two_third + 1 else: _UpperCAmelCase : Any = one_third + 1 _UpperCAmelCase : Dict = two_third - 1 else: return -1 def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int: if left < right: if right - left < precision: return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = (left + right) // 3 + 1 _UpperCAmelCase : List[Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_lowerCAmelCase, one_third - 1, _lowerCAmelCase, _lowerCAmelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) else: return rec_ternary_search(one_third + 1, two_third - 1, _lowerCAmelCase, _lowerCAmelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip() lowerCamelCase__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." lowerCamelCase__ : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip()) lowerCamelCase__ : str = ite_ternary_search(collection, target) lowerCamelCase__ : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print('''Not found''')
246
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : str = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys snake_case_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
365
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case_ : Optional[Any] = { 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ['CLIPFeatureExtractor'] snake_case_ : Dict = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = [ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
236
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class snake_case__ : lowercase__ : Dict = None def __magic_name__ ( self ) -> int: __magic_name__ : str = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ : List[str] = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE ) def __magic_name__ ( self ) -> Dict: __magic_name__ : int = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """feat_extract.json""" ) feat_extract_first.to_json_file(_SCREAMING_SNAKE_CASE ) __magic_name__ : List[Any] = self.feature_extraction_class.from_json_file(_SCREAMING_SNAKE_CASE ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __magic_name__ ( self ) -> Union[str, Any]: __magic_name__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ : Union[str, Any] = feat_extract_first.save_pretrained(_SCREAMING_SNAKE_CASE )[0] check_json_file_has_correct_format(_SCREAMING_SNAKE_CASE ) __magic_name__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __magic_name__ ( self ) -> str: __magic_name__ : Optional[Any] = self.feature_extraction_class() self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
342
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCamelCase = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def lowerCamelCase_ ( _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , ): """simple docstring""" if attention_mask is None: lowerCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase__ : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase__ : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase__ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _a : def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : List[Any]=99 , _SCREAMING_SNAKE_CASE : List[Any]=16 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : List[str]=4 , _SCREAMING_SNAKE_CASE : Union[str, Any]=4 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE : str=32 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : str=1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : List[str]=0.02 , )-> Any: lowerCAmelCase__ : Any = parent lowerCAmelCase__ : Dict = batch_size lowerCAmelCase__ : Any = seq_length lowerCAmelCase__ : Union[str, Any] = is_training lowerCAmelCase__ : Optional[Any] = use_labels lowerCAmelCase__ : List[str] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : Optional[Any] = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : int = eos_token_id lowerCAmelCase__ : Dict = pad_token_id lowerCAmelCase__ : Optional[Any] = bos_token_id lowerCAmelCase__ : str = initializer_range def UpperCAmelCase__( self : List[str] )-> Any: lowerCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase__ : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase__ : List[str] = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 ) lowerCAmelCase__ : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Optional[Any] = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCAmelCase__( self : List[str] )-> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] )-> str: lowerCAmelCase__ : str = 20 lowerCAmelCase__ : Dict = model_class_name(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase__ : str = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase__ : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase__ : Dict = model.decode( decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase__ : Tuple = model.decode( decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : str = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int )-> Tuple: lowerCAmelCase__ : int = 20 lowerCAmelCase__ : Tuple = model_class_name(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase__ : Optional[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase__ : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase__ : str = model.decode( decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase__ : Union[str, Any] = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) @require_flax class _a ( unittest.TestCase): _a : Optional[int] = 99 def UpperCAmelCase__( self : int )-> Tuple: lowerCAmelCase__ : Any = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase__ : Optional[Any] = input_ids.shape[0] lowerCAmelCase__ : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def UpperCAmelCase__( self : List[str] )-> Any: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_config_and_data() lowerCAmelCase__ : Dict = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Dict = lm_model(input_ids=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : str )-> Any: lowerCAmelCase__ : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCAmelCase__ : Dict = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowerCAmelCase__ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase__ : str = lm_model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : int )-> Dict: lowerCAmelCase__ : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowerCAmelCase__ : int = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 ) lowerCAmelCase__ : int = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum() lowerCAmelCase__ : int = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_SCREAMING_SNAKE_CASE , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _a ( _lowercase , unittest.TestCase , _lowercase): _a : Optional[int] = True _a : List[str] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) _a : Dict = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def UpperCAmelCase__( self : Optional[Any] )-> Any: lowerCAmelCase__ : int = FlaxBlenderbotModelTester(self ) def UpperCAmelCase__( self : int )-> Optional[Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : List[Any] )-> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : Tuple )-> Any: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : int = model_class(_SCREAMING_SNAKE_CASE ) @jax.jit def encode_jitted(_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : List[Any] ): return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ : str = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ : Any = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase__( self : Optional[Any] )-> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ : Dict = model_class(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Any = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase__ : str = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): return model.decode( decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ : List[Any] = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ : Optional[Any] = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase__( self : Dict )-> List[str]: for model_class_name in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase__ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def UpperCAmelCase__( self : List[Any] )-> Union[str, Any]: lowerCAmelCase__ : Optional[int] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} lowerCAmelCase__ : Tuple = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} lowerCAmelCase__ : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) lowerCAmelCase__ : List[Any] = ['''Sam'''] lowerCAmelCase__ : Dict = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''jax''' ) lowerCAmelCase__ : List[str] = model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' lowerCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) assert generated_txt[0].strip() == tgt_text
131
0
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowercase : Tuple = logging.get_logger(__name__) lowercase : Union[str, Any] = ['model.decoder.embed_positions.weights'] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "emb" in name: A : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: A : List[Any] = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: A : List[Any] = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: A : Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: A : List[Any] = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: A : List[Any] = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: A : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: A : Dict = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: A : Union[str, Any] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: A : Dict = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: A : List[Any] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = list(state_dict.keys() ) A : Optional[int] = {} for key in keys: A : List[str] = state_dict.pop(snake_case__ ) A : str = rename_keys(snake_case__ ) if "in_proj_weight" in key: # split fused qkv proj A : List[Any] = val[:hidden_size, :] A : Tuple = val[hidden_size : 2 * hidden_size, :] A : Dict = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A : int = val else: A : Union[str, Any] = val return state_dict, enc_dec_proj_state_dict def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if checkpoint == "small": # default config values A : int = 1024 A : int = 24 A : Dict = 16 elif checkpoint == "medium": A : Optional[Any] = 1536 A : str = 48 A : List[str] = 24 elif checkpoint == "large": A : int = 2048 A : int = 48 A : Dict = 32 else: raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) A : Optional[int] = MusicgenDecoderConfig( hidden_size=snake_case__ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , ) return config @torch.no_grad() def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__="cpu" ): '''simple docstring''' A : Optional[Any] = MusicGen.get_pretrained(snake_case__ , device=snake_case__ ) A : int = decoder_config_from_checkpoint(snake_case__ ) A : int = fairseq_model.lm.state_dict() A : Union[str, Any] = rename_state_dict( snake_case__ , hidden_size=decoder_config.hidden_size ) A : str = TaEncoderModel.from_pretrained('''t5-base''' ) A : str = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) A : List[str] = MusicgenForCausalLM(snake_case__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A : str = decoder.load_state_dict(snake_case__ , strict=snake_case__ ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case__ ) if len(snake_case__ ) > 0: raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' ) if len(snake_case__ ) > 0: raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model A : Union[str, Any] = MusicgenForConditionalGeneration(text_encoder=snake_case__ , audio_encoder=snake_case__ , decoder=snake_case__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case__ ) # check we can do a forward pass A : List[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A : Any = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A : List[Any] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor A : Optional[Any] = AutoTokenizer.from_pretrained('''t5-base''' ) A : List[str] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) A : List[Any] = MusicgenProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ ) # set the appropriate bos/pad token ids A : List[str] = 2048 A : List[str] = 2048 # set other default generation config params A : Any = int(30 * audio_encoder.config.frame_rate ) A : Optional[Any] = True A : str = 3.0 if pytorch_dump_folder is not None: Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if repo_id: logger.info(F'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(snake_case__ ) processor.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) lowercase : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( lowercase_ , unittest.TestCase ): __lowerCamelCase : List[Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def UpperCAmelCase__ ( self , snake_case__=0 ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCamelCase ) ) UpperCAmelCase : Optional[int] =np.random.RandomState(__UpperCamelCase ) UpperCAmelCase : int ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : List[str] =self.get_dummy_inputs() UpperCAmelCase : int =pipe(**__UpperCamelCase ).images UpperCAmelCase : Tuple =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) UpperCAmelCase : Optional[int] =np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase : List[Any] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : Optional[int] =self.get_dummy_inputs() UpperCAmelCase : str =pipe(**__UpperCamelCase ).images UpperCAmelCase : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase : Union[str, Any] =np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase : Dict =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations UpperCAmelCase : Dict =pipe(**self.get_dummy_inputs() ) UpperCAmelCase : int =self.get_dummy_inputs() UpperCAmelCase : List[str] =pipe(**__UpperCamelCase ).images UpperCAmelCase : int =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase : Union[str, Any] =np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase : List[str] =EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : List[str] =self.get_dummy_inputs() UpperCAmelCase : Any =pipe(**__UpperCamelCase ).images UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase : Dict =np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase : str =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : List[str] =self.get_dummy_inputs() UpperCAmelCase : List[str] =pipe(**__UpperCamelCase ).images UpperCAmelCase : List[str] =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase : Union[str, Any] =np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : Union[str, Any] =self.get_dummy_inputs() UpperCAmelCase : int =pipe(**__UpperCamelCase ).images UpperCAmelCase : str =image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase : Any =np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase ): @property def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[str] =ort.SessionOptions() UpperCAmelCase : Optional[Any] =False return options def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) UpperCAmelCase : int =init_image.resize((768, 512) ) # using the PNDM scheduler by default UpperCAmelCase : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : str ='''A fantasy landscape, trending on artstation''' UpperCAmelCase : Optional[int] =np.random.RandomState(0 ) UpperCAmelCase : Union[str, Any] =pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type='''np''' , ) UpperCAmelCase : int =output.images UpperCAmelCase : List[str] =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) UpperCAmelCase : str =np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase : Optional[Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) UpperCAmelCase : Optional[Any] =init_image.resize((768, 512) ) UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) UpperCAmelCase : Dict =OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) UpperCAmelCase : List[str] ='''A fantasy landscape, trending on artstation''' UpperCAmelCase : List[Any] =np.random.RandomState(0 ) UpperCAmelCase : Optional[int] =pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type='''np''' , ) UpperCAmelCase : Tuple =output.images UpperCAmelCase : str =images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) UpperCAmelCase : List[Any] =np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
348
"""simple docstring""" _snake_case : Optional[int] = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
292
0
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets __A ='''\ @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } ''' __A ='''\ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. ''' __A =''' Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "pearson": Pearson Correlation "spearmanr": Spearman Correlation "matthews_correlation": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\') >>> references = [0., 1., 2., 3., 4., 5.] >>> predictions = [0., 1., 2., 3., 4., 5.] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)}) {\'pearson\': 1.0, \'spearmanr\': 1.0} >>> glue_metric = datasets.load_metric(\'glue\', \'cola\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): return float((preds == labels).mean() ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] ) lowerCamelCase_ = float(spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def SCREAMING_SNAKE_CASE_( self ) -> Dict: if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( "You should supply a configuration name selected in " "[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", " "\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ), "references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict: if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )} elif self.config_name == "stsb": return pearson_and_spearman(lowercase , lowercase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(lowercase , lowercase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(lowercase , lowercase )} else: raise KeyError( "You should supply a configuration name selected in " "[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", " "\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
47
import copy import re class _SCREAMING_SNAKE_CASE : lowerCAmelCase__ = 'hp' lowerCAmelCase__ = {} lowerCAmelCase__ = None @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase , lowercase ) -> Tuple: lowerCamelCase_ = prefix lowerCamelCase_ = defaults cls.build_naming_info() @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]: if len(lowercase ) == 0: return "" lowerCamelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowercase ) + 1 ): lowerCamelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: lowerCamelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowercase ): lowerCamelCase_ = "" while integer != 0: lowerCamelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s lowerCamelCase_ = 0 while True: lowerCamelCase_ = word + "#" + int_to_alphabetic(lowercase ) if sword in info["reverse_short_word"]: continue else: lowerCamelCase_ = sword break lowerCamelCase_ = short_word lowerCamelCase_ = word return short_word @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> int: lowerCamelCase_ = param_name.split("_" ) lowerCamelCase_ = [TrialShortNamer.shortname_for_word(lowercase , lowercase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name lowerCamelCase_ = ["", "_"] for separator in separators: lowerCamelCase_ = separator.join(lowercase ) if shortname not in info["reverse_short_param"]: lowerCamelCase_ = shortname lowerCamelCase_ = param_name return shortname return param_name @staticmethod def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Optional[Any]: lowerCamelCase_ = TrialShortNamer.shortname_for_key(lowercase , lowercase ) lowerCamelCase_ = short_name lowerCamelCase_ = param_name @classmethod def SCREAMING_SNAKE_CASE_( cls ) -> Dict: if cls.NAMING_INFO is not None: return lowerCamelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } lowerCamelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowercase , lowercase ) lowerCamelCase_ = info @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> Optional[int]: cls.build_naming_info() assert cls.PREFIX is not None lowerCamelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue lowerCamelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(lowercase , lowercase ): lowerCamelCase_ = 1 if v else 0 lowerCamelCase_ = "" if isinstance(lowercase , (int, float) ) else "-" lowerCamelCase_ = f'{key}{sep}{v}' name.append(lowercase ) return "_".join(lowercase ) @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase ) -> List[Any]: lowerCamelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": lowerCamelCase_ = [] else: lowerCamelCase_ = repr.split("_" ) lowerCamelCase_ = {} for value in values: if "-" in value: lowerCamelCase_ , lowerCamelCase_ = value.split("-" ) else: lowerCamelCase_ = re.sub("[0-9.]" , "" , lowercase ) lowerCamelCase_ = float(re.sub("[^0-9.]" , "" , lowercase ) ) lowerCamelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] lowerCamelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: lowerCamelCase_ = cls.DEFAULTS[k] return parameters
47
1
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[int] = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = IMAGENET_DEFAULT_MEAN , __A = IMAGENET_DEFAULT_STD , **__A , ) -> None: super().__init__(**__A ) lowerCAmelCase_ :Optional[int] = size if size is not None else {"""shortest_edge""": 224} lowerCAmelCase_ :Optional[Any] = get_size_dict(__A , default_to_square=__A ) lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCAmelCase_ :Dict = get_size_dict(__A , param_name="""crop_size""" ) lowerCAmelCase_ :int = do_resize lowerCAmelCase_ :Optional[Any] = size lowerCAmelCase_ :str = resample lowerCAmelCase_ :str = do_center_crop lowerCAmelCase_ :List[Any] = crop_size lowerCAmelCase_ :Optional[Any] = do_rescale lowerCAmelCase_ :Optional[int] = rescale_factor lowerCAmelCase_ :Any = do_normalize lowerCAmelCase_ :List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCAmelCase_ :Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray: lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCAmelCase_ :Optional[Any] = int((256 / 224) * size["""shortest_edge"""] ) lowerCAmelCase_ :Tuple = get_resize_output_image_size(__A , size=__A , default_to_square=__A ) lowerCAmelCase_ :str = {"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( __A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray: lowerCAmelCase_ :Any = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray: return rescale(__A , scale=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray: return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature: lowerCAmelCase_ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ :Union[str, Any] = resample if resample is not None else self.resample lowerCAmelCase_ :Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ :str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ :str = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ :Any = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ :List[Any] = image_std if image_std is not None else self.image_std lowerCAmelCase_ :List[Any] = size if size is not None else self.size lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A ) lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ :int = get_size_dict(__A , param_name="""crop_size""" ) lowerCAmelCase_ :Optional[Any] = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCAmelCase_ :Any = [to_numpy_array(__A ) for image in images] if do_resize: lowerCAmelCase_ :Tuple = [self.resize(__A , __A , __A ) for image in images] if do_center_crop: lowerCAmelCase_ :int = [self.center_crop(__A , __A ) for image in images] if do_rescale: lowerCAmelCase_ :Optional[int] = [self.rescale(__A , __A ) for image in images] if do_normalize: lowerCAmelCase_ :List[str] = [self.normalize(__A , __A , __A ) for image in images] lowerCAmelCase_ :Tuple = [to_channel_dimension_format(__A , __A ) for image in images] lowerCAmelCase_ :List[str] = {"""pixel_values""": images} return BatchFeature(data=__A , tensor_type=__A )
84
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Optional[Any] ) -> str: '''simple docstring''' lowerCAmelCase_ :str = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" ) if "norm" in key: lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" ) if "layer_norm1" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )] lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" ) if "attn.q" in key: lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" ) if "bot_conv" in key: lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCAmelCase_ :List[Any] = value return new_state_dict def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCAmelCase_ :Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase_ :List[Any] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :] def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int: '''simple docstring''' lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor() # prepare image lowerCAmelCase_ :List[Any] = prepare_img() lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) ) # rename keys lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass lowerCAmelCase_ :Dict = model(lowercase__ ) lowerCAmelCase_ :Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase_ :Optional[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowerCAmelCase_ :Any = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __UpperCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
84
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowercase : Dict = imread(r"digital_image_processing/image_data/lena_small.jpg") lowercase : Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: _snake_case = cn.convert_to_negative(_UpperCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: _snake_case = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def SCREAMING_SNAKE_CASE__ ( ) -> Any: _snake_case = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() _snake_case = canny.canny(_UpperCamelCase ) # assert canny array for at least one True assert canny_array.any() def SCREAMING_SNAKE_CASE__ ( ) -> Any: assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all() def SCREAMING_SNAKE_CASE__ ( ) -> int: _snake_case = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] ) _snake_case = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase ) assert res.any() def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: assert med.median_filter(_UpperCamelCase , 3 ).any() def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: _snake_case , _snake_case = sob.sobel_filter(_UpperCamelCase ) assert grad.any() and theta.any() def SCREAMING_SNAKE_CASE__ ( ) -> Dict: _snake_case = sp.make_sepia(_UpperCamelCase , 20 ) assert sepia.all() def SCREAMING_SNAKE_CASE__ ( __A = "digital_image_processing/image_data/lena_small.jpg" ) -> str: _snake_case = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def SCREAMING_SNAKE_CASE__ ( __A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict: _snake_case = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: _snake_case = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. _snake_case = imread(_UpperCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None _snake_case = 0 _snake_case = 0 _snake_case = image[x_coordinate][y_coordinate] _snake_case = lbp.get_neighbors_pixel( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _snake_case = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _snake_case = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) assert lbp_image.any()
359
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __UpperCAmelCase : @staticmethod def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( __A ) -> str: _snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict: _snake_case = np.array(__A ) _snake_case = npimg.shape return {"hash": hashimage(__A ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __UpperCAmelCase ( unittest.TestCase ): __lowercase = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __lowercase = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def lowerCamelCase ( self ): """simple docstring""" pass @slow @require_torch def lowerCamelCase ( self ): """simple docstring""" _snake_case = pipeline('mask-generation' , model='facebook/sam-vit-huge' ) _snake_case = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 ) # Shortening by hashing _snake_case = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase ( self ): """simple docstring""" _snake_case = 'facebook/sam-vit-huge' _snake_case = pipeline('mask-generation' , model=lowerCAmelCase_ ) _snake_case = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 ) # Shortening by hashing _snake_case = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [ {'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210}, {'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053}, ] , )
160
0
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCamelCase : int = False class A__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class A__ ( unittest.TestCase ): def A ( self : List[Any] ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[str] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe.dual_guided( prompt='first prompt' , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_a ) _SCREAMING_SNAKE_CASE =VersatileDiffusionPipeline.from_pretrained(_a , torch_dtype=torch.floataa ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =generator.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe.dual_guided( prompt='first prompt' , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A ( self : int ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE ='cyberpunk 2077' _SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe.dual_guided( prompt=_a , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger ' _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe.text_to_image( prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _SCREAMING_SNAKE_CASE =pipe.image_variation(_a , generator=_a , output_type='numpy' ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
47
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowerCamelCase : Optional[int] = False class A__ ( unittest.TestCase ): pass @slow @require_torch_gpu class A__ ( unittest.TestCase ): def A ( self : Tuple ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
47
1
"""simple docstring""" def lowercase ( a__ : list[list[int]] , a__ : int , a__ : int , a__ : list[int] ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowercase ( a__ : list[list[int]] , a__ : list[int] , a__ : int ) -> bool: # Base Case if curr_ind == len(a__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(a__ ) ): if valid_connection(a__ , a__ , a__ , a__ ): # Insert current vertex into path as next transition _UpperCamelCase = next_ver # Validate created path if util_hamilton_cycle(a__ , a__ , curr_ind + 1 ): return True # Backtrack _UpperCamelCase = -1 return False def lowercase ( a__ : list[list[int]] , a__ : int = 0 ) -> list[int]: _UpperCamelCase = [-1] * (len(a__ ) + 1) # initialize start and end of path with starting index _UpperCamelCase = _UpperCamelCase = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(a__ , a__ , 1 ) else []
368
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( _lowercase): snake_case__ = ['''image_processor''', '''tokenizer'''] snake_case__ = '''BlipImageProcessor''' snake_case__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> int: _UpperCamelCase = False super().__init__(__UpperCamelCase , __UpperCamelCase ) _UpperCamelCase = self.image_processor def __call__( self : Any , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding: if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: _UpperCamelCase = self.tokenizer _UpperCamelCase = self.tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) return text_encoding # add pixel_values _UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase ) if text is not None: _UpperCamelCase = self.tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) else: _UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__UpperCamelCase ) return encoding_image_processor def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ) -> str: return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def _UpperCamelCase ( self : List[str] ) -> Dict: _UpperCamelCase = self.tokenizer.model_input_names _UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
54
0
'''simple docstring''' import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase_ ( a__ , unittest.TestCase ): __UpperCAmelCase = CLIPTokenizer __UpperCAmelCase = CLIPTokenizerFast __UpperCAmelCase = True __UpperCAmelCase = {} __UpperCAmelCase = False def __a ( self ): super().setUp() # fmt: off UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) ) UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] UpperCamelCase__ = {"unk_token": "<unk>"} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(a ) ) def __a ( self , **a ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **a ) def __a ( self , **a ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a ) def __a ( self , a ): UpperCamelCase__ = "lower newer" UpperCamelCase__ = "lower newer" return input_text, output_text def __a ( self ): UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = "lower newer" UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] UpperCamelCase__ = tokenizer.tokenize(a ) self.assertListEqual(a , a ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) @require_ftfy def __a ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a ) UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." UpperCamelCase__ = tokenizer_s.tokenize(a ) UpperCamelCase__ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y" UpperCamelCase__ = tokenizer_s.tokenize(a ) UpperCamelCase__ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) # Test that the tokenization is identical on unicode of space type UpperCamelCase__ = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: UpperCamelCase__ = tokenizer_s.tokenize(a ) UpperCamelCase__ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) # Test that the tokenization is identical on unicode of line break type UpperCamelCase__ = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: UpperCamelCase__ = tokenizer_s.tokenize(a ) UpperCamelCase__ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) def __a ( self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}''' UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , ) UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , ) UpperCamelCase__ = f''' {text}''' UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , ) UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , ) def __a ( self ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(a ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def __a ( self ): super().test_tokenization_python_rust_equals() def __a ( self ): # CLIP always lower cases letters pass
80
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase: Optional[int] = logging.get_logger(__name__) UpperCAmelCase: str = { """SCUT-DLVCLab/lilt-roberta-en-base""": ( """https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json""" ), } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "lilt" def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=None ,UpperCAmelCase_=4 ,UpperCAmelCase_=10_24 ,**UpperCAmelCase_ ,): super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = vocab_size _lowercase : Tuple = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Any = hidden_act _lowercase : Dict = intermediate_size _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Any = attention_probs_dropout_prob _lowercase : str = max_position_embeddings _lowercase : Tuple = type_vocab_size _lowercase : Tuple = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[Any] = position_embedding_type _lowercase : int = classifier_dropout _lowercase : str = channel_shrink_ratio _lowercase : List[str] = max_ad_position_embeddings
367
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """M-CLIP""" def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict: lowerCamelCase__ : Optional[int] = transformerDimSize lowerCamelCase__ : Optional[Any] = imageDimSize super().__init__(**UpperCAmelCase ) class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = MCLIPConfig def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict: super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple: lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0] lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(UpperCAmelCase ), embs
50
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() @dataclass class a_ : '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = field(default_factory=snake_case_ ) UpperCamelCase = field(default_factory=snake_case_ ) def snake_case_( self , A , A , A ) -> Optional[int]: _SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad ) if has_not_submodules: self.traced.append(A ) def __call__( self , A ) -> str: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(A ) [x.remove() for x in self.handles] return self @property def snake_case_( self ) -> str: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class a_ : '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 0 UpperCamelCase = field(default_factory=snake_case_ ) UpperCamelCase = field(default_factory=snake_case_ ) def __call__( self , A ) -> List[str]: _SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized _SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized _SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) ) _SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) ) if len(A ) != len(A ): raise Exception( f'Numbers of operations are different. Source module has {len(A )} operations while' f' destination module has {len(A )}.' ) for dest_m, src_m in zip(A , A ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int: print(F'Converting {name}...' ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval() _SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval() _SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one." _SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}' print(__lowerCamelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , ) # we can use the convnext one _SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , ) print(F'Pushed {checkpoint_name}' ) def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any: _SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" _SCREAMING_SNAKE_CASE = 1000 _SCREAMING_SNAKE_CASE = (1, num_labels) _SCREAMING_SNAKE_CASE = """huggingface/label-files""" _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) _SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { """resnet18""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ), """resnet26""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), """resnet34""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ), """resnet50""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), """resnet101""": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), """resnet152""": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), } if model_name: convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
58
0
'''simple docstring''' def _lowerCamelCase ( lowerCamelCase_ : list , lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): """simple docstring""" if index == number_of_items: return 0 UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : List[Any] = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : List[str] = values[index] + knapsack( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_weight - weights[index] , index + 1 ) return max(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
352
'''simple docstring''' import os import time import numpy as np import onnxruntime as ort snake_case__ : Optional[int] = '''1''' snake_case__ : str = '''0''' snake_case__ : List[str] = '''1''' snake_case__ : List[str] = ort.SessionOptions() snake_case__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') snake_case__ : Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider'''] snake_case__ : Dict = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) snake_case__ : str = ort.RunOptions() snake_case__ : List[Any] = 128 snake_case__ : Union[str, Any] = 1 snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa) snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa) snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') snake_case__ : Union[str, Any] = time.time() snake_case__ : str = 2000 snake_case__ : Tuple = {} for iter in range(max_iters): snake_case__ : str = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
274
0
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters UpperCAmelCase__ : int = False UpperCAmelCase__ : List[Any] = False def lowerCamelCase__ ( a ) -> List[Any]: return TrainCommand(a ) class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @staticmethod def __magic_name__ ( lowerCAmelCase_ : ArgumentParser ): """simple docstring""" _A: str = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=lowerCAmelCase_ , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=lowerCAmelCase_ , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=lowerCAmelCase_ , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=lowerCAmelCase_ , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=lowerCAmelCase_ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=lowerCAmelCase_ , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=lowerCAmelCase_ , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=lowerCAmelCase_ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=lowerCAmelCase_ , default=3_2 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=lowerCAmelCase_ , default=6_4 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=lowerCAmelCase_ , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=lowerCAmelCase_ , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : Union[str, Any] , lowerCAmelCase_ : Namespace ): """simple docstring""" _A: Optional[Any] = logging.get_logger('''transformers-cli/training''' ) _A: Tuple = '''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=lowerCAmelCase_ ) _A: Tuple = args.output _A: str = args.column_label _A: Optional[int] = args.column_text _A: Any = args.column_id self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": _A: Optional[int] = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"""Loading dataset from {args.train_data}""" ) _A: Optional[Any] = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) _A: int = None if args.validation_data: self.logger.info(F"""Loading validation dataset from {args.validation_data}""" ) _A: List[str] = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) _A: str = args.validation_split _A: Optional[Any] = args.train_batch_size _A: Tuple = args.valid_batch_size _A: List[Any] = args.learning_rate _A: str = args.adam_epsilon def __magic_name__ ( self : Dict ): """simple docstring""" if self.framework == "tf": return self.run_tf() return self.run_torch() def __magic_name__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError def __magic_name__ ( self : int ): """simple docstring""" self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
121
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def lowerCamelCase__ ( a = True , *a , **a ) -> Optional[Any]: if not is_tqdm_available(): raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' ) _A: Optional[Any] = False if main_process_only: _A: Union[str, Any] = PartialState().local_process_index == 0 return _tqdm(*a , **a , disable=a )
121
1
"""simple docstring""" def __lowerCamelCase ( a_ : int ) -> int: if not isinstance(a_ , a_ ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __SCREAMING_SNAKE_CASE :Tuple = 0 __SCREAMING_SNAKE_CASE :Union[str, Any] = str(a_ ) while len(a_ ) != 1: __SCREAMING_SNAKE_CASE :Tuple = [int(a_ ) for i in num_string] __SCREAMING_SNAKE_CASE :List[Any] = 1 for i in range(0 , len(a_ ) ): total *= numbers[i] __SCREAMING_SNAKE_CASE :List[Any] = str(a_ ) steps += 1 return steps def __lowerCamelCase ( a_ : int ) -> int: if not isinstance(a_ , a_ ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __SCREAMING_SNAKE_CASE :int = 0 __SCREAMING_SNAKE_CASE :Tuple = str(a_ ) while len(a_ ) != 1: __SCREAMING_SNAKE_CASE :Any = [int(a_ ) for i in num_string] __SCREAMING_SNAKE_CASE :int = 0 for i in range(0 , len(a_ ) ): total += numbers[i] __SCREAMING_SNAKE_CASE :Union[str, Any] = str(a_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
239
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class _SCREAMING_SNAKE_CASE( A ): def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> Optional[int]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE :Any = value_function __SCREAMING_SNAKE_CASE :List[str] = unet __SCREAMING_SNAKE_CASE :int = scheduler __SCREAMING_SNAKE_CASE :Optional[int] = env __SCREAMING_SNAKE_CASE :Optional[int] = env.get_dataset() __SCREAMING_SNAKE_CASE :str = {} for key in self.data.keys(): try: __SCREAMING_SNAKE_CASE :Optional[Any] = self.data[key].mean() except: # noqa: E722 pass __SCREAMING_SNAKE_CASE :Dict = {} for key in self.data.keys(): try: __SCREAMING_SNAKE_CASE :str = self.data[key].std() except: # noqa: E722 pass __SCREAMING_SNAKE_CASE :Optional[int] = env.observation_space.shape[0] __SCREAMING_SNAKE_CASE :int = env.action_space.shape[0] def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" return x_in * self.stds[key] + self.means[key] def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" if type(SCREAMING_SNAKE_CASE__ ) is dict: return {k: self.to_torch(SCREAMING_SNAKE_CASE__ ) for k, v in x_in.items()} elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ): return x_in.to(self.unet.device ) return torch.tensor(SCREAMING_SNAKE_CASE__ ,device=self.unet.device ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" for key, val in cond.items(): __SCREAMING_SNAKE_CASE :Dict = val.clone() return x_in def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE :Union[str, Any] = x.shape[0] __SCREAMING_SNAKE_CASE :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __SCREAMING_SNAKE_CASE :Tuple = torch.full((batch_size,) ,SCREAMING_SNAKE_CASE__ ,device=self.unet.device ,dtype=torch.long ) for _ in range(SCREAMING_SNAKE_CASE__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __SCREAMING_SNAKE_CASE :str = self.value_function(x.permute(0 ,2 ,1 ) ,SCREAMING_SNAKE_CASE__ ).sample __SCREAMING_SNAKE_CASE :Union[str, Any] = torch.autograd.grad([y.sum()] ,[x] )[0] __SCREAMING_SNAKE_CASE :int = self.scheduler._get_variance(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = torch.exp(0.5 * posterior_variance ) __SCREAMING_SNAKE_CASE :List[str] = model_std * grad __SCREAMING_SNAKE_CASE :Dict = 0 __SCREAMING_SNAKE_CASE :List[Any] = x.detach() __SCREAMING_SNAKE_CASE :Union[str, Any] = x + scale * grad __SCREAMING_SNAKE_CASE :Any = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim ) __SCREAMING_SNAKE_CASE :Optional[int] = self.unet(x.permute(0 ,2 ,1 ) ,SCREAMING_SNAKE_CASE__ ).sample.permute(0 ,2 ,1 ) # TODO: verify deprecation of this kwarg __SCREAMING_SNAKE_CASE :Optional[int] = self.scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,predict_epsilon=SCREAMING_SNAKE_CASE__ )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) __SCREAMING_SNAKE_CASE :List[str] = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim ) __SCREAMING_SNAKE_CASE :Dict = self.to_torch(SCREAMING_SNAKE_CASE__ ) return x, y def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.1 ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = self.normalize(SCREAMING_SNAKE_CASE__ ,'''observations''' ) __SCREAMING_SNAKE_CASE :List[Any] = obs[None].repeat(SCREAMING_SNAKE_CASE__ ,axis=0 ) __SCREAMING_SNAKE_CASE :str = {0: self.to_torch(SCREAMING_SNAKE_CASE__ )} __SCREAMING_SNAKE_CASE :Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __SCREAMING_SNAKE_CASE :Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ ,device=self.unet.device ) __SCREAMING_SNAKE_CASE :Tuple = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim ) __SCREAMING_SNAKE_CASE :Any = self.to_torch(SCREAMING_SNAKE_CASE__ ) # run the diffusion process __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.run_diffusion(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) # sort output trajectories by value __SCREAMING_SNAKE_CASE :Any = y.argsort(0 ,descending=SCREAMING_SNAKE_CASE__ ).squeeze() __SCREAMING_SNAKE_CASE :Any = x[sorted_idx] __SCREAMING_SNAKE_CASE :str = sorted_values[:, :, : self.action_dim] __SCREAMING_SNAKE_CASE :Union[str, Any] = actions.detach().cpu().numpy() __SCREAMING_SNAKE_CASE :Optional[int] = self.de_normalize(SCREAMING_SNAKE_CASE__ ,key='''actions''' ) # select the action with the highest value if y is not None: __SCREAMING_SNAKE_CASE :Optional[int] = 0 else: # if we didn't run value guiding, select a random action __SCREAMING_SNAKE_CASE :Any = np.random.randint(0 ,SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = denorm_actions[selected_index, 0] return denorm_actions
239
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] a_ = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __lowercase ( lowerCamelCase : List[Any] ): UpperCamelCase_ : List[str] = torch.load(lowerCamelCase , map_location='cpu' ) return sd def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str=rename_keys_prefix ): UpperCamelCase_ : Any = OrderedDict() UpperCamelCase_ : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue UpperCamelCase_ : str = key for name_pair in rename_keys_prefix: UpperCamelCase_ : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) UpperCamelCase_ : Optional[Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately UpperCamelCase_ : Dict = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: UpperCamelCase_ : str = 'pretraining' if "vcr" in checkpoint_path: UpperCamelCase_ : List[Any] = {'visual_embedding_dim': 512} elif "vqa_advanced" in checkpoint_path: UpperCamelCase_ : List[str] = {'visual_embedding_dim': 2048} elif "vqa" in checkpoint_path: UpperCamelCase_ : List[Any] = {'visual_embedding_dim': 2048} elif "nlvr" in checkpoint_path: UpperCamelCase_ : str = {'visual_embedding_dim': 1024} else: raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." ) else: if "vcr" in checkpoint_path: UpperCamelCase_ : Any = {'visual_embedding_dim': 512} UpperCamelCase_ : List[str] = 'multichoice' elif "vqa_advanced" in checkpoint_path: UpperCamelCase_ : List[Any] = {'visual_embedding_dim': 2048} UpperCamelCase_ : Tuple = 'vqa_advanced' elif "vqa" in checkpoint_path: UpperCamelCase_ : Dict = {'visual_embedding_dim': 2048, 'num_labels': 3129} UpperCamelCase_ : Any = 'vqa' elif "nlvr" in checkpoint_path: UpperCamelCase_ : Optional[int] = { 'visual_embedding_dim': 1024, 'num_labels': 2, } UpperCamelCase_ : Dict = 'nlvr' UpperCamelCase_ : Tuple = VisualBertConfig(**lowerCamelCase ) # Load State Dict UpperCamelCase_ : Dict = load_state_dict(lowerCamelCase ) UpperCamelCase_ : Optional[int] = get_new_dict(lowerCamelCase , lowerCamelCase ) if model_type == "pretraining": UpperCamelCase_ : Optional[Any] = VisualBertForPreTraining(lowerCamelCase ) elif model_type == "vqa": UpperCamelCase_ : List[str] = VisualBertForQuestionAnswering(lowerCamelCase ) elif model_type == "nlvr": UpperCamelCase_ : List[Any] = VisualBertForVisualReasoning(lowerCamelCase ) elif model_type == "multichoice": UpperCamelCase_ : List[str] = VisualBertForMultipleChoice(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) # Save Checkpoints Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') a_ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
175
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def __lowercase ( lowerCamelCase : Any ): UpperCamelCase_ : Union[str, Any] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"{test_file} instead." ) UpperCamelCase_ : str = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) UpperCamelCase_ : Union[str, Any] = components[:-1] + [test_fn.replace('.py' , '' )] UpperCamelCase_ : List[Any] = '.'.join(lowerCamelCase ) return test_module_path def __lowercase ( lowerCamelCase : Optional[Any] ): UpperCamelCase_ : List[Any] = get_module_path(lowerCamelCase ) UpperCamelCase_ : Union[str, Any] = importlib.import_module(lowerCamelCase ) return test_module def __lowercase ( lowerCamelCase : List[str] ): UpperCamelCase_ : int = [] UpperCamelCase_ : Tuple = get_test_module(lowerCamelCase ) for attr in dir(lowerCamelCase ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(lowerCamelCase , lowerCamelCase ) ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : List[str] = [] UpperCamelCase_ : Union[str, Any] = get_test_module(lowerCamelCase ) for attr in dir(lowerCamelCase ): UpperCamelCase_ : Dict = getattr(lowerCamelCase , lowerCamelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). UpperCamelCase_ : Optional[int] = getattr(lowerCamelCase , 'all_model_classes' , [] ) if len(lowerCamelCase ) > 0: test_classes.append(lowerCamelCase ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : Dict ): UpperCamelCase_ : int = get_test_classes(lowerCamelCase ) UpperCamelCase_ : List[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : Tuple ): UpperCamelCase_ : int = test_class() if hasattr(lowerCamelCase , 'setUp' ): test.setUp() UpperCamelCase_ : List[Any] = None if hasattr(lowerCamelCase , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: UpperCamelCase_ : Optional[Any] = test.model_tester.__class__ return model_tester def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Dict ): UpperCamelCase_ : Optional[Any] = get_test_classes(lowerCamelCase ) UpperCamelCase_ : Tuple = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCamelCase ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Tuple ): UpperCamelCase_ : List[Any] = get_test_classes_for_model(lowerCamelCase , lowerCamelCase ) UpperCamelCase_ : int = [] for test_class in test_classes: UpperCamelCase_ : Tuple = get_model_tester_from_test_class(lowerCamelCase ) if tester_class is not None: tester_classes.append(lowerCamelCase ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : Tuple = get_test_classes(lowerCamelCase ) UpperCamelCase_ : Tuple = {test_class: get_model_tester_from_test_class(lowerCamelCase ) for test_class in test_classes} return test_tester_mapping def __lowercase ( lowerCamelCase : Any ): UpperCamelCase_ : List[str] = get_model_classes(lowerCamelCase ) UpperCamelCase_ : int = { model_class: get_test_classes_for_model(lowerCamelCase , lowerCamelCase ) for model_class in model_classes } return model_test_mapping def __lowercase ( lowerCamelCase : Tuple ): UpperCamelCase_ : Tuple = get_model_classes(lowerCamelCase ) UpperCamelCase_ : Optional[Any] = { model_class: get_tester_classes_for_model(lowerCamelCase , lowerCamelCase ) for model_class in model_classes } return model_to_tester_mapping def __lowercase ( lowerCamelCase : Any ): if isinstance(lowerCamelCase , lowerCamelCase ): return o elif isinstance(lowerCamelCase , lowerCamelCase ): return o.__name__ elif isinstance(lowerCamelCase , (list, tuple) ): return [to_json(lowerCamelCase ) for x in o] elif isinstance(lowerCamelCase , lowerCamelCase ): return {to_json(lowerCamelCase ): to_json(lowerCamelCase ) for k, v in o.items()} else: return o
175
1
"""simple docstring""" import functools from typing import Any def _A ( UpperCamelCase_ : str, UpperCamelCase_ : list[str]) -> bool: '''simple docstring''' if not isinstance(UpperCamelCase_, UpperCamelCase_) or len(UpperCamelCase_) == 0: raise ValueError("the string should be not empty string") if not isinstance(UpperCamelCase_, UpperCamelCase_) or not all( isinstance(UpperCamelCase_, UpperCamelCase_) and len(UpperCamelCase_) > 0 for item in words): raise ValueError("the words should be a list of non-empty strings") # Build trie __lowercase = {} __lowercase = "WORD_KEEPER" for word in words: __lowercase = trie for c in word: if c not in trie_node: __lowercase = {} __lowercase = trie_node[c] __lowercase = True __lowercase = len(UpperCamelCase_) # Dynamic programming method @functools.cache def is_breakable(UpperCamelCase_ : int) -> bool: if index == len_string: return True __lowercase = trie for i in range(UpperCamelCase_, UpperCamelCase_): __lowercase = trie_node.get(string[i], UpperCamelCase_) if trie_node is None: return False if trie_node.get(UpperCamelCase_, UpperCamelCase_) and is_breakable(i + 1): return True return False return is_breakable(0) if __name__ == "__main__": import doctest doctest.testmod()
144
"""simple docstring""" import re from filelock import FileLock try: import nltk _a = True except (ImportError, ModuleNotFoundError): _a = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _A ( UpperCamelCase_ : str) -> str: '''simple docstring''' re.sub("<n>", "", UpperCamelCase_) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(UpperCamelCase_))
144
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker A__ : Dict = '''CompVis/stable-diffusion-v1-1''' A__ : int = '''CompVis/stable-diffusion-v1-2''' A__ : List[Any] = '''CompVis/stable-diffusion-v1-3''' A__ : List[Any] = '''CompVis/stable-diffusion-v1-4''' class __snake_case ( UpperCamelCase_ ): def __init__( self : List[str] , A_ : AutoencoderKL , A_ : CLIPTextModel , A_ : CLIPTokenizer , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A_ : StableDiffusionSafetyChecker , A_ : CLIPImageProcessor , A_ : bool = True , ): super()._init_() lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(A_) lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(A_) lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(A_) lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline( vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , requires_safety_checker=A_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea) @property def UpperCAmelCase__ ( self : int): return {k: getattr(self , A_) for k in self.config.keys() if not k.startswith('''_''')} def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Union[str, int]] = "auto"): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase_ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A_) def UpperCAmelCase__ ( self : Optional[int]): self.enable_attention_slicing(A_) @torch.no_grad() def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, List[str]] , A_ : int = 5_1_2 , A_ : int = 5_1_2 , A_ : int = 5_0 , A_ : float = 7.5 , A_ : Optional[Union[str, List[str]]] = None , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[torch.Generator] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , **A_ : str , ): return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Dict , A_ : Union[str, List[str]] , A_ : int = 5_1_2 , A_ : int = 5_1_2 , A_ : int = 5_0 , A_ : float = 7.5 , A_ : Optional[Union[str, List[str]]] = None , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[torch.Generator] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , **A_ : Union[str, Any] , ): return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, List[str]] , A_ : int = 5_1_2 , A_ : int = 5_1_2 , A_ : int = 5_0 , A_ : float = 7.5 , A_ : Optional[Union[str, List[str]]] = None , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[torch.Generator] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , **A_ : Tuple , ): return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Union[str, List[str]] , A_ : int = 5_1_2 , A_ : int = 5_1_2 , A_ : int = 5_0 , A_ : float = 7.5 , A_ : Optional[Union[str, List[str]]] = None , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[torch.Generator] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , **A_ : int , ): return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , A_ : Union[str, List[str]] , A_ : int = 5_1_2 , A_ : int = 5_1_2 , A_ : int = 5_0 , A_ : float = 7.5 , A_ : Optional[Union[str, List[str]]] = None , A_ : Optional[int] = 1 , A_ : float = 0.0 , A_ : Optional[torch.Generator] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , **A_ : List[str] , ): lowerCAmelCase_ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(A_) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""") # Get first result from Stable Diffusion Checkpoint v1.1 lowerCAmelCase_ : Optional[Any] = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCAmelCase_ : str = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCAmelCase_ : List[str] = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCAmelCase_ : List[Any] = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
103
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """yolos""" def __init__( self , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=[5_1_2, 8_6_4] , __lowerCAmelCase=1_6 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = qkv_bias lowerCamelCase__ = num_detection_tokens lowerCamelCase__ = use_mid_position_embeddings lowerCamelCase__ = auxiliary_loss # Hungarian matcher lowerCamelCase__ = class_cost lowerCamelCase__ = bbox_cost lowerCamelCase__ = giou_cost # Loss coefficients lowerCamelCase__ = bbox_loss_coefficient lowerCamelCase__ = giou_loss_coefficient lowerCamelCase__ = eos_coefficient class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = version.parse("""1.11""" ) @property def __lowerCamelCase ( self ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): '''simple docstring''' return 1E-4 @property def __lowerCamelCase ( self ): '''simple docstring''' return 1_2
209
0
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : str = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) snake_case : Dict = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) model.to(UpperCamelCase__ ) from datasets import load_dataset snake_case : Union[str, Any] = load_dataset("nielsr/rvlcdip-demo" ) snake_case : Any = dataset["train"][0]["image"].convert("RGB" ) snake_case : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): snake_case : int = model(**UpperCamelCase__ ) snake_case : int = outputs.logits snake_case : int = torch.Size((1, 16) ) self.assertEqual(logits.shape , UpperCamelCase__ ) snake_case : int = torch.tensor( [-0.4158, -0.4092, -0.4347] , device=UpperCamelCase__ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
112
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __snake_case = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __lowerCAmelCase ( lowercase : Optional[int] ) -> List[str]: """simple docstring""" snake_case : Optional[Any] = list(s_dict.keys() ) for key in keys: snake_case : Any = R".*/layers_(\d+)" snake_case : Tuple = key if re.match(lowercase , lowercase ): snake_case : List[str] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowercase ) snake_case : Union[str, Any] = R"(encoder|decoder)\/" if re.match(lowercase , lowercase ): snake_case : Any = re.match(lowercase , lowercase ).groups() if groups[0] == "encoder": snake_case : Union[str, Any] = re.sub(R"/mlp/" , R"/1/mlp/" , lowercase ) snake_case : int = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowercase ) elif groups[0] == "decoder": snake_case : str = re.sub(R"/mlp/" , R"/2/mlp/" , lowercase ) snake_case : List[str] = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowercase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: snake_case : int = new_key.replace(lowercase , lowercase ) print(F'{key} -> {new_key}' ) snake_case : Optional[Any] = s_dict.pop(lowercase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case : int = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case : Optional[Any] = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: snake_case : Tuple = s_dict[key].shape[0] snake_case : int = s_dict[key] for idx in range(lowercase ): snake_case : List[str] = expert_weihts[idx] print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(lowercase ) return s_dict __snake_case = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __lowerCAmelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> int: """simple docstring""" import regex as re with open(lowercase , "r" ) as f: snake_case : List[str] = f.read() snake_case : Tuple = re.findall(R"(.*) = ([0-9.]*)" , lowercase ) snake_case : Any = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": snake_case : Tuple = float(lowercase ) if "." in value else int(lowercase ) snake_case : List[str] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowercase )[0] snake_case : List[Any] = str(activation[1] ) snake_case : Optional[Any] = num_experts snake_case : List[Any] = SwitchTransformersConfig(**lowercase ) return config def __lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Union[str, Any]=None , lowercase : Any="./" , lowercase : int=8 ) -> Dict: """simple docstring""" print(F'Loading flax weights from : {flax_checkpoint_path}' ) snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(lowercase ) if gin_file is not None: snake_case : List[str] = convert_gin_to_config(lowercase , lowercase ) else: snake_case : str = SwitchTransformersConfig.from_pretrained(lowercase ) snake_case : Any = SwitchTransformersForConditionalGeneration(lowercase ) snake_case : Optional[Any] = flax_params["target"] snake_case : Optional[int] = flatten_dict(lowercase , sep="/" ) snake_case : Optional[Any] = rename_keys(lowercase ) snake_case : List[str] = unflatten_dict(lowercase , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(lowercase , lowercase ) print(F'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(lowercase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") __snake_case = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
112
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : List[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class A__ ( A__ ): A__ = ['pixel_values'] def __init__( self : int , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : bool = True , _a : Dict[str, int] = None , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : bool = True , **_a : Any , ) -> None: '''simple docstring''' super().__init__(**_a ) _SCREAMING_SNAKE_CASE =size if size is not None else {'shortest_edge': 224} _SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a ) _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'height': 224, 'width': 224} _SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a , param_name='crop_size' ) _SCREAMING_SNAKE_CASE =do_resize _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =resample _SCREAMING_SNAKE_CASE =do_center_crop _SCREAMING_SNAKE_CASE =crop_size _SCREAMING_SNAKE_CASE =do_rescale _SCREAMING_SNAKE_CASE =rescale_factor _SCREAMING_SNAKE_CASE =do_normalize _SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else OPENAI_CLIP_MEAN _SCREAMING_SNAKE_CASE =image_std if image_std is not None else OPENAI_CLIP_STD _SCREAMING_SNAKE_CASE =do_convert_rgb def A ( self : int , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : Optional[Union[str, ChannelDimension]] = None , **_a : str , ) -> np.ndarray: '''simple docstring''' _SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) _SCREAMING_SNAKE_CASE =get_resize_output_image_size(_a , size=size['shortest_edge'] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def A ( self : Optional[Any] , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _SCREAMING_SNAKE_CASE =get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(_a , size=(size['height'], size['width']) , data_format=_a , **_a ) def A ( self : List[Any] , _a : np.ndarray , _a : Union[int, float] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : str , ) -> str: '''simple docstring''' return rescale(_a , scale=_a , data_format=_a , **_a ) def A ( self : List[Any] , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Optional[Any] , ) -> np.ndarray: '''simple docstring''' return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def A ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : int = None , _a : bool = None , _a : float = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : bool = None , _a : Optional[Union[str, TensorType]] = None , _a : Optional[ChannelDimension] = ChannelDimension.FIRST , **_a : Optional[Any] , ) -> PIL.Image.Image: '''simple docstring''' _SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize _SCREAMING_SNAKE_CASE =size if size is not None else self.size _SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='size' , default_to_square=_a ) _SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample _SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size _SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='crop_size' , default_to_square=_a ) _SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale _SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor _SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize _SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean _SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std _SCREAMING_SNAKE_CASE =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _SCREAMING_SNAKE_CASE =make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _SCREAMING_SNAKE_CASE =[convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. _SCREAMING_SNAKE_CASE =[to_numpy_array(_a ) for image in images] if do_resize: _SCREAMING_SNAKE_CASE =[self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: _SCREAMING_SNAKE_CASE =[self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: _SCREAMING_SNAKE_CASE =[self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _SCREAMING_SNAKE_CASE =[self.normalize(image=_a , mean=_a , std=_a ) for image in images] _SCREAMING_SNAKE_CASE =[to_channel_dimension_format(_a , _a ) for image in images] _SCREAMING_SNAKE_CASE ={'pixel_values': images} return BatchFeature(data=_a , tensor_type=_a )
47
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowerCamelCase : Optional[int] = False class A__ ( unittest.TestCase ): pass @slow @require_torch_gpu class A__ ( unittest.TestCase ): def A ( self : Tuple ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
47
1
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowerCamelCase (a_ :Optional[Any]) -> Union[str, Any]: lowercase :str = int(a_) lowercase , lowercase , lowercase :Tuple = t // 3600, (t // 60) % 60, t % 60 return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}""" def lowerCamelCase (a_ :Optional[Any] , a_ :str , a_ :Optional[int] , a_ :str , a_ :Any=300) -> Optional[int]: # docstyle-ignore return F""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def lowerCamelCase (a_ :Tuple) -> Optional[Any]: lowercase :Any = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowercase :List[Any] = F"""{elt:.6f}""" if isinstance(a_ , a_) else str(a_) html_code += F""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __magic_name__ : __A : Optional[Any] = 5 __A : int = 0.2 def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[str] = None , snake_case__ : bool = True , snake_case__ : Optional["NotebookTrainingTracker"] = None , snake_case__ : int = 3_0_0 , ): '''simple docstring''' lowercase :Tuple = total lowercase :List[Any] = '''''' if prefix is None else prefix lowercase :Optional[Any] = leave lowercase :List[str] = parent lowercase :Tuple = width lowercase :Optional[Any] = None lowercase :int = None lowercase :Optional[Any] = None def __snake_case ( self : Optional[int] , snake_case__ : int , snake_case__ : bool = False , snake_case__ : str = None ): '''simple docstring''' lowercase :Optional[Any] = value if comment is not None: lowercase :Optional[int] = comment if self.last_value is None: lowercase :Dict = time.time() lowercase :Dict = value lowercase :Optional[Any] = None lowercase :List[Any] = self.warmup lowercase :str = 1 self.update_bar(snake_case__ ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 lowercase :Optional[int] = time.time() lowercase :Optional[int] = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowercase :Dict = self.elapsed_time / (value - self.start_value) else: lowercase :Optional[Any] = None if value >= self.total: lowercase :str = self.total lowercase :Union[str, Any] = None if not self.leave: self.close() elif self.average_time_per_item is not None: lowercase :int = self.average_time_per_item * (self.total - value) self.update_bar(snake_case__ ) lowercase :Tuple = value lowercase :List[str] = current_time if self.average_time_per_item is None: lowercase :Tuple = 1 else: lowercase :Any = max(int(self.update_every / self.average_time_per_item ) , 1 ) def __snake_case ( self : List[Any] , snake_case__ : int , snake_case__ : int=None ): '''simple docstring''' lowercase :str = ''' ''' * (len(str(self.total ) ) - len(str(snake_case__ ) )) + str(snake_case__ ) if self.elapsed_time is None: lowercase :Tuple = f"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: lowercase :Optional[Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: lowercase :int = ( f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" f""" {format_time(self.predicted_remaining )}""" ) self.label += f""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]""" self.display() def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowercase :List[str] = disp.display(disp.HTML(self.html_code ) , display_id=snake_case__ ) else: self.output.update(disp.HTML(self.html_code ) ) def __snake_case ( self : Tuple ): '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Any , snake_case__ : str , snake_case__ : Optional[Any]=None ): '''simple docstring''' super().__init__(snake_case__ ) lowercase :int = None if column_names is None else [column_names] lowercase :List[str] = None def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowercase :List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=snake_case__ ) else: self.output.update(disp.HTML(self.html_code ) ) def __snake_case ( self : Any , snake_case__ : int ): '''simple docstring''' if self.inner_table is None: lowercase :List[Any] = [list(values.keys() ), list(values.values() )] else: lowercase :Optional[Any] = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(snake_case__ ) lowercase :List[Any] = columns self.inner_table.append([values[c] for c in columns] ) def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=3_0_0 ): '''simple docstring''' lowercase :Union[str, Any] = NotebookProgressBar(snake_case__ , prefix=snake_case__ , parent=self , width=snake_case__ ) return self.child_bar def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :Dict = None self.display() class __magic_name__ ( __UpperCAmelCase ): def __init__( self : List[str] ): '''simple docstring''' lowercase :List[str] = None lowercase :Union[str, Any] = None lowercase :List[str] = False def __snake_case ( self : Dict , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , **snake_case__ : int ): '''simple docstring''' lowercase :Tuple = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' lowercase :Any = 0 lowercase :int = 0 lowercase :List[str] = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) lowercase :str = NotebookTrainingTracker(state.max_steps , snake_case__ ) def __snake_case ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int , **snake_case__ : Dict ): '''simple docstring''' lowercase :Dict = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) lowercase :int = False def __snake_case ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=None , **snake_case__ : int ): '''simple docstring''' if not has_length(snake_case__ ): return if self.prediction_bar is None: if self.training_tracker is not None: lowercase :Tuple = self.training_tracker.add_child(len(snake_case__ ) ) else: lowercase :Optional[int] = NotebookProgressBar(len(snake_case__ ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __snake_case ( self : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , **snake_case__ : str ): '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() lowercase :Any = None def __snake_case ( self : Dict , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Any=None , **snake_case__ : str ): '''simple docstring''' if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowercase :Optional[int] = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy lowercase :Dict = state.global_step self.training_tracker.write_line(snake_case__ ) def __snake_case ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[Any]=None , **snake_case__ : Optional[Any] ): '''simple docstring''' if self.training_tracker is not None: lowercase :int = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: lowercase :Tuple = log['''loss'''] break if self.first_column == "Epoch": lowercase :Union[str, Any] = int(state.epoch ) else: lowercase :Any = state.global_step lowercase :List[str] = '''eval''' for k in metrics: if k.endswith('''_loss''' ): lowercase :Tuple = re.sub(r'''\_loss$''' , '''''' , snake_case__ ) lowercase :Optional[Any] = metrics.pop('''total_flos''' , snake_case__ ) lowercase :Union[str, Any] = metrics.pop('''epoch''' , snake_case__ ) lowercase :Any = metrics.pop(f"""{metric_key_prefix}_runtime""" , snake_case__ ) lowercase :Any = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , snake_case__ ) lowercase :List[Any] = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , snake_case__ ) lowercase :List[str] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , snake_case__ ) for k, v in metrics.items(): if k == f"""{metric_key_prefix}_loss""": lowercase :Optional[Any] = v else: lowercase :Optional[Any] = k.split('''_''' ) lowercase :List[str] = ''' '''.join([part.capitalize() for part in splits[1:]] ) lowercase :Union[str, Any] = v self.training_tracker.write_line(snake_case__ ) self.training_tracker.remove_child() lowercase :Any = None # Evaluation takes a long time so we should force the next update. lowercase :Any = True def __snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , **snake_case__ : Any ): '''simple docstring''' self.training_tracker.update( state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=snake_case__ ) lowercase :Optional[int] = None
172
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) def lowerCamelCase (a_ :str) -> YolosConfig: lowercase :Union[str, Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowercase :List[str] = 192 lowercase :List[str] = 768 lowercase :int = 12 lowercase :str = 3 lowercase :List[Any] = [800, 1333] lowercase :Any = False elif yolos_name == "yolos_s_dWr": lowercase :List[str] = 330 lowercase :List[Any] = 14 lowercase :int = 6 lowercase :List[Any] = 1320 elif "yolos_s" in yolos_name: lowercase :int = 384 lowercase :Union[str, Any] = 1536 lowercase :int = 12 lowercase :str = 6 elif "yolos_b" in yolos_name: lowercase :Dict = [800, 1344] lowercase :List[str] = 91 lowercase :List[Any] = '''huggingface/label-files''' lowercase :Union[str, Any] = '''coco-detection-id2label.json''' lowercase :int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r''')) lowercase :List[Any] = {int(a_): v for k, v in idalabel.items()} lowercase :Dict = idalabel lowercase :Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase (a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[int]: for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase :Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""") lowercase :List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""") # next, add query, keys and values (in that order) to the state dict lowercase :int = in_proj_weight[: config.hidden_size, :] lowercase :List[str] = in_proj_bias[: config.hidden_size] lowercase :Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase :int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase :Any = in_proj_weight[-config.hidden_size :, :] lowercase :Union[str, Any] = in_proj_bias[-config.hidden_size :] def lowerCamelCase (a_ :str) -> str: if "backbone" in name: lowercase :Optional[int] = name.replace('''backbone''' , '''vit''') if "cls_token" in name: lowercase :List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''') if "det_token" in name: lowercase :int = name.replace('''det_token''' , '''embeddings.detection_tokens''') if "mid_pos_embed" in name: lowercase :List[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''') if "pos_embed" in name: lowercase :List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''') if "patch_embed.proj" in name: lowercase :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''') if "blocks" in name: lowercase :Any = name.replace('''blocks''' , '''encoder.layer''') if "attn.proj" in name: lowercase :Dict = name.replace('''attn.proj''' , '''attention.output.dense''') if "attn" in name: lowercase :Tuple = name.replace('''attn''' , '''attention.self''') if "norm1" in name: lowercase :List[Any] = name.replace('''norm1''' , '''layernorm_before''') if "norm2" in name: lowercase :List[Any] = name.replace('''norm2''' , '''layernorm_after''') if "mlp.fc1" in name: lowercase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''') if "mlp.fc2" in name: lowercase :Dict = name.replace('''mlp.fc2''' , '''output.dense''') if "class_embed" in name: lowercase :Dict = name.replace('''class_embed''' , '''class_labels_classifier''') if "bbox_embed" in name: lowercase :Dict = name.replace('''bbox_embed''' , '''bbox_predictor''') if "vit.norm" in name: lowercase :Dict = name.replace('''vit.norm''' , '''vit.layernorm''') return name def lowerCamelCase (a_ :dict , a_ :YolosForObjectDetection) -> dict: for key in orig_state_dict.copy().keys(): lowercase :List[Any] = orig_state_dict.pop(a_) if "qkv" in key: lowercase :str = key.split('''.''') lowercase :List[str] = int(key_split[2]) lowercase :List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowercase :List[Any] = val[:dim, :] lowercase :Optional[int] = val[ dim : dim * 2, : ] lowercase :Any = val[-dim:, :] else: lowercase :List[str] = val[:dim] lowercase :Union[str, Any] = val[dim : dim * 2] lowercase :List[Any] = val[-dim:] else: lowercase :List[str] = val return orig_state_dict def lowerCamelCase () -> torch.Tensor: lowercase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase :Dict = Image.open(requests.get(a_ , stream=a_).raw) return im @torch.no_grad() def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :bool = False) -> List[Any]: lowercase :Union[str, Any] = get_yolos_config(a_) # load original state_dict lowercase :List[str] = torch.load(a_ , map_location='''cpu''')['''model'''] # load 🤗 model lowercase :Tuple = YolosForObjectDetection(a_) model.eval() lowercase :Dict = convert_state_dict(a_ , a_) model.load_state_dict(a_) # Check outputs on an image, prepared by YolosImageProcessor lowercase :Tuple = 800 if yolos_name != '''yolos_ti''' else 512 lowercase :Dict = YolosImageProcessor(format='''coco_detection''' , size=a_) lowercase :Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''') lowercase :List[Any] = model(**a_) lowercase , lowercase :Dict = outputs.logits, outputs.pred_boxes lowercase , lowercase :int = None, None if yolos_name == "yolos_ti": lowercase :Dict = torch.tensor( [[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]]) lowercase :Dict = torch.tensor( [[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]]) elif yolos_name == "yolos_s_200_pre": lowercase :Union[str, Any] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]]) lowercase :List[str] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]]) elif yolos_name == "yolos_s_300_pre": lowercase :int = torch.tensor( [[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]]) lowercase :Optional[Any] = torch.tensor( [[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]]) elif yolos_name == "yolos_s_dWr": lowercase :int = torch.tensor( [[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]]) lowercase :Dict = torch.tensor( [[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]]) elif yolos_name == "yolos_base": lowercase :Dict = torch.tensor( [[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]]) lowercase :Tuple = torch.tensor( [[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]]) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""") assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4) assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1E-4) Path(a_).mkdir(exist_ok=a_) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""") model.save_pretrained(a_) print(F"""Saving image processor to {pytorch_dump_folder_path}""") image_processor.save_pretrained(a_) if push_to_hub: lowercase :Optional[int] = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''') lowercase :Optional[Any] = model_mapping[yolos_name] image_processor.push_to_hub(a_ , organization='''hustvl''') model.push_to_hub(a_ , organization='''hustvl''') if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCAmelCase = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
172
1
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCamelCase_ : def __init__( self : Dict , _A : List[str] , _A : Dict=100 , _A : Tuple=13 , _A : str=30 , _A : Optional[Any]=2 , _A : List[str]=3 , _A : str=True , _A : Tuple=True , _A : Any=32 , _A : Dict=4 , _A : Union[str, Any]=4 , _A : Dict=37 , _A : int="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=10 , _A : int=0.0_2 , _A : Dict=3 , _A : Optional[Any]=None , _A : Dict=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = parent UpperCAmelCase__ : List[str] = 100 UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : str = patch_size UpperCAmelCase__ : Optional[int] = num_channels UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[Any] = scope UpperCAmelCase__ : int = out_indices UpperCAmelCase__ : Any = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : int = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self : str ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def lowercase_ ( self : Tuple , _A : Any , _A : Dict , _A : Tuple , _A : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Dict = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self : str , _A : Optional[int] , _A : str , _A : Optional[Any] , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = BeitForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowercase_ ( self : List[str] , _A : Tuple , _A : Any , _A : Tuple , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : str = self.type_sequence_label_size UpperCAmelCase__ : Any = BeitForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : str = 1 UpperCAmelCase__ : Dict = BeitForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : Any = BeitForSemanticSegmentation(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Optional[Any] = model(_A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(_A , labels=_A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs UpperCAmelCase__ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Any = model_class(_A ) UpperCAmelCase__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Any = [*signature.parameters.keys()] UpperCAmelCase__ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_A ) def lowercase_ ( self : Dict ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : List[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(_A ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : List[Any] = model_class(_A ) model.to(_A ) model.train() UpperCAmelCase__ : List[str] = self._prepare_for_class(_A , _A , return_labels=_A ) UpperCAmelCase__ : int = model(**_A ).loss loss.backward() def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(_A ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : Tuple = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() UpperCAmelCase__ : str = self._prepare_for_class(_A , _A , return_labels=_A ) UpperCAmelCase__ : str = model(**_A ).loss loss.backward() def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Any = _config_zero_init(_A ) for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(config=_A ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def lowercase_ ( self : List[str] ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Dict = BeitModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> int: UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_A ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A ) # prepare bool_masked_pos UpperCAmelCase__ : Optional[int] = torch.ones((1, 196) , dtype=torch.bool ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(pixel_values=_A , bool_masked_pos=_A ) UpperCAmelCase__ : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase__ : List[Any] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : int = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_A ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _A , atol=1e-2 ) ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_A ) UpperCAmelCase__ : str = self.default_image_processor UpperCAmelCase__ : Tuple = prepare_img() UpperCAmelCase__ : List[str] = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**_A ) UpperCAmelCase__ : List[str] = outputs.logits # verify the logits UpperCAmelCase__ : Tuple = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : Any = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_A ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4 ) ) UpperCAmelCase__ : Any = 281 self.assertEqual(logits.argmax(-1 ).item() , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( _A ) UpperCAmelCase__ : Optional[Any] = self.default_image_processor UpperCAmelCase__ : Tuple = prepare_img() UpperCAmelCase__ : Optional[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**_A ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : Tuple = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_A ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_396 self.assertEqual(logits.argmax(-1 ).item() , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) UpperCAmelCase__ : int = model.to(_A ) UpperCAmelCase__ : List[str] = BeitImageProcessor(do_resize=_A , size=640 , do_center_crop=_A ) UpperCAmelCase__ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]['''file'''] ) UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[Any] = model(**_A ) UpperCAmelCase__ : Union[str, Any] = outputs.logits # verify the logits UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , _A ) UpperCAmelCase__ : Tuple = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: UpperCAmelCase__ : List[Any] = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=_A , ) else: UpperCAmelCase__ : List[Any] = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=_A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) UpperCAmelCase__ : Optional[Any] = model.to(_A ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=_A , size=640 , do_center_crop=_A ) UpperCAmelCase__ : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase__ : str = Image.open(ds[0]['''file'''] ) UpperCAmelCase__ : str = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Any = model(**_A ) UpperCAmelCase__ : Any = outputs.logits.detach().cpu() UpperCAmelCase__ : int = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)] ) UpperCAmelCase__ : Union[str, Any] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , _A ) UpperCAmelCase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_A ) UpperCAmelCase__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , _A )
181
'''simple docstring''' import pprint import requests UpperCamelCase__ = '''https://zenquotes.io/api''' def a__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def a__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": UpperCamelCase__ = random_quotes() pprint.pprint(response)
181
1
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: if len(_lowercase ) != len(_lowercase ): raise ValueError("The length of profit and weight must be same." ) if max_weight <= 0: raise ValueError("max_weight must greater than zero." ) if any(p < 0 for p in profit ): raise ValueError("Profit can not be negative." ) if any(w < 0 for w in weight ): raise ValueError("Weight can not be negative." ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. UpperCamelCase_ = [p / w for p, w in zip(_lowercase , _lowercase )] # Creating a copy of the list and sorting profit/weight in ascending order UpperCamelCase_ = sorted(_lowercase ) # declaring useful variables UpperCamelCase_ = len(_lowercase ) UpperCamelCase_ = 0 UpperCamelCase_ = 0 UpperCamelCase_ = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight UpperCamelCase_ = sorted_profit_by_weight[length - i - 1] UpperCamelCase_ = profit_by_weight.index(_lowercase ) UpperCamelCase_ = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( 'Input profits, weights, and then max_weight (all positive ints) separated by ' 'spaces.' ) _UpperCAmelCase = [int(x) for x in input('Input profits separated by spaces: ').split()] _UpperCAmelCase = [int(x) for x in input('Input weights separated by spaces: ').split()] _UpperCAmelCase = int(input('Max weight allowed: ')) # Function Call calc_profit(profit, weight, max_weight)
359
from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCAmelCase_ ( UpperCamelCase_ ) -> int: for param in module.parameters(): UpperCamelCase_ = False def lowerCAmelCase_ ( ) -> Dict: UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCamelCase_ = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]: UpperCamelCase_ = plt.imshow(UpperCamelCase_ ) fig.axes.get_xaxis().set_visible(UpperCamelCase_ ) fig.axes.get_yaxis().set_visible(UpperCamelCase_ ) plt.show() def lowerCAmelCase_ ( ) -> List[str]: UpperCamelCase_ = datetime.now() UpperCamelCase_ = current_time.strftime("%H:%M:%S" ) return timestamp
328
0
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowercase ( A__ ): """simple docstring""" _a = ["image_processor", "tokenizer"] _a = "OwlViTImageProcessor" _a = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCAmelCase__ , ) UpperCamelCase__ :List[Any] = kwargs.pop('''feature_extractor''' ) UpperCamelCase__ :List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) def __call__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="max_length" , UpperCamelCase_="np" , **UpperCamelCase_ ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(text[0] , UpperCAmelCase__ )): UpperCamelCase__ :Optional[Any] = [self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )] elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(text[0] , UpperCAmelCase__ ): UpperCamelCase__ :Any = [] # Maximum number of queries across batch UpperCamelCase__ :List[str] = max([len(UpperCAmelCase__ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase__ ) != max_num_queries: UpperCamelCase__ :Dict = t + [''' '''] * (max_num_queries - len(UpperCAmelCase__ )) UpperCamelCase__ :Optional[Any] = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) encodings.append(UpperCAmelCase__ ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": UpperCamelCase__ :Union[str, Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) UpperCamelCase__ :List[Any] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp UpperCamelCase__ :Any = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) UpperCamelCase__ :int = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch UpperCamelCase__ :Dict = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) UpperCamelCase__ :Union[str, Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf UpperCamelCase__ :Dict = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) UpperCamelCase__ :List[str] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) UpperCamelCase__ :Optional[int] = BatchEncoding() UpperCamelCase__ :Optional[Any] = input_ids UpperCamelCase__ :Dict = attention_mask if query_images is not None: UpperCamelCase__ :int = BatchEncoding() UpperCamelCase__ :Optional[Any] = self.image_processor( UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ).pixel_values UpperCamelCase__ :Tuple = query_pixel_values if images is not None: UpperCamelCase__ :List[Any] = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if text is not None and images is not None: UpperCamelCase__ :List[str] = image_features.pixel_values return encoding elif query_images is not None and images is not None: UpperCamelCase__ :Any = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ ) def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' return self.image_processor.post_process(*UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' return self.image_processor.post_process_object_detection(*UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , ) return self.image_processor_class @property def lowerCAmelCase__ ( self ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , ) return self.image_processor
97
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : str = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
54
0
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy __A : str = logging.get_logger(__name__) __A : int = { 'artists_file': 'artists.json', 'lyrics_file': 'lyrics.json', 'genres_file': 'genres.json', } __A : Tuple = { 'artists_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json', }, 'genres_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json', }, 'lyrics_file': { 'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json', }, } __A : int = { 'jukebox': 5_12, } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Any = VOCAB_FILES_NAMES _UpperCamelCase:int = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase:Optional[int] = PRETRAINED_LYRIC_TOKENS_SIZES _UpperCamelCase:Optional[Any] = ["input_ids", "attention_mask"] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=["v3", "v2", "v2"] , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE="<|endoftext|>" , **_SCREAMING_SNAKE_CASE , )-> Optional[int]: lowerCamelCase_ =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token super().__init__( unk_token=_SCREAMING_SNAKE_CASE , n_genres=_SCREAMING_SNAKE_CASE , version=_SCREAMING_SNAKE_CASE , max_n_lyric_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCamelCase_ =version lowerCamelCase_ =max_n_lyric_tokens lowerCamelCase_ =n_genres with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase_ =json.load(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase_ =json.load(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase_ =json.load(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: lowerCamelCase_ =oov.replace(R"""\-'""" , R"""\-+'""" ) lowerCamelCase_ =regex.compile(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={v: k for k, v in self.artists_encoder.items()} lowerCamelCase_ ={v: k for k, v in self.genres_encoder.items()} lowerCamelCase_ ={v: k for k, v in self.lyrics_encoder.items()} @property def _snake_case ( self )-> Union[str, Any]: return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def _snake_case ( self )-> int: return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =[self.artists_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for artist in list_artists] for genres in range(len(_SCREAMING_SNAKE_CASE ) ): lowerCamelCase_ =[self.genres_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for genre in list_genres[genres]] lowerCamelCase_ =list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) lowerCamelCase_ =[[self.lyrics_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]: return list(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =self.prepare_for_tokenization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =self._tokenize(_SCREAMING_SNAKE_CASE ) return artist, genre, lyrics def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )-> Tuple[str, str, str, Dict[str, Any]]: for idx in range(len(self.version ) ): if self.version[idx] == "v3": lowerCamelCase_ =artists[idx].lower() lowerCamelCase_ =[genres[idx].lower()] else: lowerCamelCase_ =self._normalize(artists[idx] ) + """.v2""" lowerCamelCase_ =[ self._normalize(_SCREAMING_SNAKE_CASE ) + """.v2""" for genre in genres[idx].split("""_""" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": lowerCamelCase_ =regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" ) lowerCamelCase_ ="""ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" lowerCamelCase_ ={vocab[index]: index + 1 for index in range(len(_SCREAMING_SNAKE_CASE ) )} lowerCamelCase_ =0 lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) + 1 lowerCamelCase_ =self.vocab lowerCamelCase_ ={v: k for k, v in self.vocab.items()} lowerCamelCase_ ="""""" else: lowerCamelCase_ =regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" ) lowerCamelCase_ =self._run_strip_accents(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =lyrics.replace("""\\""" , """\n""" ) lowerCamelCase_ =self.out_of_vocab.sub("""""" , _SCREAMING_SNAKE_CASE ), [], [] return artists, genres, lyrics def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple: lowerCamelCase_ =unicodedata.normalize("""NFD""" , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[] for char in text: lowerCamelCase_ =unicodedata.category(_SCREAMING_SNAKE_CASE ) if cat == "Mn": continue output.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str: lowerCamelCase_ =( [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )] + [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )] + [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )] + ["""."""] ) lowerCamelCase_ =frozenset(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =re.compile(R"""_+""" ) lowerCamelCase_ ="""""".join([c if c in accepted else """_""" for c in text.lower()] ) lowerCamelCase_ =pattern.sub("""_""" , _SCREAMING_SNAKE_CASE ).strip("""_""" ) return text def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> str: return " ".join(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )-> Union[str, Any]: # Convert to TensorType if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =TensorType(_SCREAMING_SNAKE_CASE ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" ) import tensorflow as tf lowerCamelCase_ =tf.constant lowerCamelCase_ =tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" ) import torch lowerCamelCase_ =torch.tensor lowerCamelCase_ =torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" ) import jax.numpy as jnp # noqa: F811 lowerCamelCase_ =jnp.array lowerCamelCase_ =_is_jax else: lowerCamelCase_ =np.asarray lowerCamelCase_ =_is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: lowerCamelCase_ =[inputs] if not is_tensor(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =as_tensor(_SCREAMING_SNAKE_CASE ) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" ) return inputs def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE="pt" )-> BatchEncoding: lowerCamelCase_ =[0, 0, 0] lowerCamelCase_ =[artist] * len(self.version ) lowerCamelCase_ =[genres] * len(self.version ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =self.tokenize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =self._convert_token_to_id(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[-INFINITY] * len(full_tokens[-1] ) lowerCamelCase_ =[ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_SCREAMING_SNAKE_CASE ) for i in range(len(self.version ) ) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase_ =os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) ) return (artists_file, genres_file, lyrics_file) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =self.artists_decoder.get(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =[self.genres_decoder.get(_SCREAMING_SNAKE_CASE ) for genre in genres_index] lowerCamelCase_ =[self.lyrics_decoder.get(_SCREAMING_SNAKE_CASE ) for character in lyric_index] return artist, genres, lyrics
351
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' __A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' __A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=500 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , )-> List[str]: lowerCamelCase_ =compute_mauve( p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , ) return out
49
0
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( _lowercase , unittest.TestCase ): lowerCamelCase : str = XLMTokenizer lowerCamelCase : Optional[Any] = False def UpperCAmelCase__ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ : Any = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowerCamelCase_ : List[str] = dict(zip(A , range(len(A ) ) ) ) lowerCamelCase_ : Dict = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(A ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(A ) ) def UpperCAmelCase__ (self , A ): lowerCamelCase_ : Tuple = '''lower newer''' lowerCamelCase_ : int = '''lower newer''' return input_text, output_text def UpperCAmelCase__ (self ): lowerCamelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ : int = '''lower''' lowerCamelCase_ : List[Any] = ['''low''', '''er</w>'''] lowerCamelCase_ : Dict = tokenizer.tokenize(A ) self.assertListEqual(A , A ) lowerCamelCase_ : List[Any] = tokens + ['''<unk>'''] lowerCamelCase_ : int = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) @slow def UpperCAmelCase__ (self ): lowerCamelCase_ : Union[str, Any] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) lowerCamelCase_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=A ) lowerCamelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A ) lowerCamelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(A ) lowerCamelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
318
'''simple docstring''' from __future__ import annotations import time __lowercase : List[Any] = list[tuple[int, int]] __lowercase : List[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __lowercase : def __init__(self , A , A , A , A , A ): lowerCamelCase_ : Optional[int] = pos_x lowerCamelCase_ : List[str] = pos_y lowerCamelCase_ : List[Any] = (pos_y, pos_x) lowerCamelCase_ : List[str] = goal_x lowerCamelCase_ : Union[str, Any] = goal_y lowerCamelCase_ : int = parent class __lowercase : def __init__(self , A , A ): lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A ) lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A ) lowerCamelCase_ : Union[str, Any] = [self.start] lowerCamelCase_ : List[str] = False def UpperCAmelCase__ (self ): while self.node_queue: lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: lowerCamelCase_ : List[str] = True return self.retrace_path(A ) lowerCamelCase_ : str = self.get_successors(A ) for node in successors: self.node_queue.append(A ) if not self.reached: return [self.start.pos] return None def UpperCAmelCase__ (self , A ): lowerCamelCase_ : Dict = [] for action in delta: lowerCamelCase_ : Any = parent.pos_x + action[1] lowerCamelCase_ : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(A , A , self.target.pos_y , self.target.pos_x , A ) ) return successors def UpperCAmelCase__ (self , A ): lowerCamelCase_ : int = node lowerCamelCase_ : str = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase_ : List[Any] = current_node.parent path.reverse() return path class __lowercase : def __init__(self , A , A ): lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A ) lowerCamelCase_ : Any = BreadthFirstSearch(A , A ) lowerCamelCase_ : Union[str, Any] = False def UpperCAmelCase__ (self ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 ) lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: lowerCamelCase_ : Optional[Any] = True return self.retrace_bidirectional_path( A , A ) lowerCamelCase_ : Optional[int] = current_bwd_node lowerCamelCase_ : List[str] = current_fwd_node lowerCamelCase_ : List[str] = { self.fwd_bfs: self.fwd_bfs.get_successors(A ), self.bwd_bfs: self.bwd_bfs.get_successors(A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def UpperCAmelCase__ (self , A , A ): lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A ) lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A ) bwd_path.pop() bwd_path.reverse() lowerCamelCase_ : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowercase : List[str] = (0, 0) __lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowercase : Tuple = time.time() __lowercase : int = BreadthFirstSearch(init, goal) __lowercase : Dict = bfs.search() __lowercase : Dict = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) __lowercase : int = time.time() __lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal) __lowercase : Any = bd_bfs.search() __lowercase : Dict = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
318
1
'''simple docstring''' from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __snake_case =logging.get_logger("""transformers.models.encodec""") __snake_case ={ """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } __snake_case ={ """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } __snake_case ={ """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } __snake_case ={ """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } __snake_case ={ """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } __snake_case ={ **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __snake_case ={ **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __snake_case =[] __snake_case =[] def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ): for attribute in key.split('.' ): lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) if weight_type is not None: lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape else: lowerCAmelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCAmelCase = value elif weight_type == "weight_g": lowerCAmelCase = value elif weight_type == "weight_v": lowerCAmelCase = value elif weight_type == "bias": lowerCAmelCase = value elif weight_type == "running_mean": lowerCAmelCase = value elif weight_type == "running_var": lowerCAmelCase = value elif weight_type == "num_batches_tracked": lowerCAmelCase = value elif weight_type == "weight_ih_l0": lowerCAmelCase = value elif weight_type == "weight_hh_l0": lowerCAmelCase = value elif weight_type == "bias_ih_l0": lowerCAmelCase = value elif weight_type == "bias_hh_l0": lowerCAmelCase = value elif weight_type == "weight_ih_l1": lowerCAmelCase = value elif weight_type == "weight_hh_l1": lowerCAmelCase = value elif weight_type == "bias_ih_l1": lowerCAmelCase = value elif weight_type == "bias_hh_l1": lowerCAmelCase = value else: lowerCAmelCase = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ): for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCAmelCase , lowerCAmelCase = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ): lowerCAmelCase = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCAmelCase = MAPPING_24K elif model_name == "encodec_48khz": lowerCAmelCase = MAPPING_48K else: raise ValueError(f'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase , lowerCamelCase ): logger.info(f'''{name} was ignored''' ) continue lowerCAmelCase = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCAmelCase , lowerCAmelCase = key.split('.*.' ) if prefix in name and suffix in name: lowerCAmelCase = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue lowerCAmelCase = True if "*" in mapped_key: lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2] lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase ) if "weight_g" in name: lowerCAmelCase = 'weight_g' elif "weight_v" in name: lowerCAmelCase = 'weight_v' elif "weight_ih_l0" in name: lowerCAmelCase = 'weight_ih_l0' elif "weight_hh_l0" in name: lowerCAmelCase = 'weight_hh_l0' elif "bias_ih_l0" in name: lowerCAmelCase = 'bias_ih_l0' elif "bias_hh_l0" in name: lowerCAmelCase = 'bias_hh_l0' elif "weight_ih_l1" in name: lowerCAmelCase = 'weight_ih_l1' elif "weight_hh_l1" in name: lowerCAmelCase = 'weight_hh_l1' elif "bias_ih_l1" in name: lowerCAmelCase = 'bias_ih_l1' elif "bias_hh_l1" in name: lowerCAmelCase = 'bias_hh_l1' elif "bias" in name: lowerCAmelCase = 'bias' elif "weight" in name: lowerCAmelCase = 'weight' elif "running_mean" in name: lowerCAmelCase = 'running_mean' elif "running_var" in name: lowerCAmelCase = 'running_var' elif "num_batches_tracked" in name: lowerCAmelCase = 'num_batches_tracked' else: lowerCAmelCase = None set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) continue if not is_used: unused_weights.append(lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) @torch.no_grad() def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ): if config_path is not None: lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase ) else: lowerCAmelCase = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCAmelCase = [8, 5, 4, 4] lowerCAmelCase = [2.2] lowerCAmelCase = 64 lowerCAmelCase = 32000 lowerCAmelCase = 2048 lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False elif model_name == "encodec_48khz": lowerCAmelCase = [8, 5, 4, 2] lowerCAmelCase = [3.0, 6.0, 12.0, 24.0] lowerCAmelCase = 48000 lowerCAmelCase = 2 lowerCAmelCase = False lowerCAmelCase = 'time_group_norm' lowerCAmelCase = True lowerCAmelCase = 1.0 lowerCAmelCase = 0.01 else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase = EncodecModel(lowerCamelCase ) lowerCAmelCase = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase ) lowerCAmelCase = torch.load(lowerCamelCase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCAmelCase = original_checkpoint['best_state'] recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase ) model.save_pretrained(lowerCamelCase ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(lowerCamelCase ) model.push_to_hub(lowerCamelCase ) if __name__ == "__main__": __snake_case =argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) __snake_case =parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
4
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]]) -> bool: '''simple docstring''' __UpperCamelCase : Any = len(_lowerCamelCase) # We need to create solution object to save path. __UpperCamelCase : List[str] = [[0 for _ in range(_lowerCamelCase)] for _ in range(_lowerCamelCase)] __UpperCamelCase : Optional[int] = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase) if solved: print("\n".join(str(_lowerCamelCase) for row in solutions)) else: print("No solution exists!") return solved def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]]) -> bool: '''simple docstring''' __UpperCamelCase : Tuple = len(_lowerCamelCase) # Final check point. if i == j == (size - 1): __UpperCamelCase : Optional[int] = 1 return True __UpperCamelCase : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds __UpperCamelCase : List[str] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. __UpperCamelCase : int = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited __UpperCamelCase : Tuple = 1 # check for directions if ( run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase) or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase) or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase) or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase) ): return True __UpperCamelCase : Tuple = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
232
0
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1024 ) -> Union[str, Any]: """simple docstring""" snake_case__ , snake_case__ : List[str] = [], [] snake_case__ : List[str] = list(zip(__lowerCAmelCase , __lowerCAmelCase ) ) snake_case__ , snake_case__ : int = sorted_examples[0] def is_too_big(__lowerCAmelCase ): return tok(__lowerCAmelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): snake_case__ : Any = new_src + ''' ''' + src snake_case__ : Optional[Any] = new_tgt + ''' ''' + tgt if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example finished_src.append(__lowerCAmelCase ) finished_tgt.append(__lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = src, tgt else: # can fit, keep adding snake_case__ , snake_case__ : Dict = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__lowerCAmelCase ) finished_tgt.append(__lowerCAmelCase ) return finished_src, finished_tgt def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: """simple docstring""" snake_case__ : Tuple = Path(__lowerCAmelCase ) save_path.mkdir(exist_ok=__lowerCAmelCase ) for split in ["train"]: snake_case__ , snake_case__ : Tuple = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" snake_case__ : int = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()] snake_case__ : List[Any] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()] snake_case__ , snake_case__ : Tuple = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) print(f"""packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.""" ) Path(save_path / f"""{split}.source""" ).open('''w''' ).write('''\n'''.join(__lowerCAmelCase ) ) Path(save_path / f"""{split}.target""" ).open('''w''' ).write('''\n'''.join(__lowerCAmelCase ) ) for split in ["val", "test"]: snake_case__ , snake_case__ : Tuple = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(__lowerCAmelCase , save_path / f"""{split}.source""" ) shutil.copyfile(__lowerCAmelCase , save_path / f"""{split}.target""" ) def _lowerCAmelCase ( ) -> List[str]: """simple docstring""" snake_case__ : Dict = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=__lowerCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=__lowerCAmelCase , default=128 ) parser.add_argument('''--data_dir''' , type=__lowerCAmelCase ) parser.add_argument('''--save_path''' , type=__lowerCAmelCase ) snake_case__ : Dict = parser.parse_args() snake_case__ : Any = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
359
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class a : def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,): snake_case__ : Optional[int] = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Optional[Any] = seq_length snake_case__ : Tuple = is_training snake_case__ : Optional[Any] = use_input_mask snake_case__ : List[Any] = use_token_type_ids snake_case__ : str = use_labels snake_case__ : List[Any] = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : List[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = intermediate_multiple_size snake_case__ : Tuple = hidden_act snake_case__ : Optional[Any] = hidden_dropout snake_case__ : str = attention_dropout snake_case__ : List[str] = weight_tying snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Optional[int] = type_vocab_size snake_case__ : str = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : int = num_labels snake_case__ : int = num_choices snake_case__ : int = scope def __lowerCamelCase ( self :List[str] ): snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case__ : str = None if self.use_input_mask: snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) snake_case__ : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCamelCase ( self :int ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,) def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs() snake_case__ : Union[str, Any] = True return config, input_ids, input_mask, token_labels def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ): snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase ) snake_case__ : Optional[Any] = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ): snake_case__ : Any = True snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : str = model(__lowercase ,attention_mask=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ): snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ): snake_case__ : Optional[int] = True snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() # first forward pass snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase ) snake_case__ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 ) snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 ) snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase ) snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0] snake_case__ : List[str] = model( __lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0] # select random slice snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item() snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) ) def __lowerCamelCase ( self :Dict ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () __lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () __lowerCAmelCase : int = ( {"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) __lowerCAmelCase : List[Any] = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : str = False def __lowerCamelCase ( self :Any ): snake_case__ : int = GPTNeoXJapaneseModelTester(self ) snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 ) def __lowerCamelCase ( self :Any ): self.config_tester.run_common_tests() def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): # This regression test was failing with PyTorch < 1.3 snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case__ : List[str] = None self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase ) def __lowerCamelCase ( self :str ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__lowercase ) @slow def __lowerCamelCase ( self :Dict ): snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b''' snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、'''] snake_case__ : Optional[int] = [ '''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''', '''100年後に必要とされる会社は、「人」が中心の会社です。''', '''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''', '''国境の長いトンネルを抜けると、そこは雪国だった。''', '''美味しい日本食といえば、やっぱりお寿司ですよね。''', ] snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase ) snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase ) snake_case__ : Optional[int] = [] for prompt in prompts: snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 ) snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase ) predicted_outputs += generated_string self.assertListEqual(__lowercase ,__lowercase )
44
0
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase : Optional[int] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' lowercase : Union[str, Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' lowercase : Any = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): '''simple docstring''' def _lowerCamelCase ( self :Tuple ) -> Union[str, Any]: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _lowerCamelCase ( self :str , a :Union[str, Any] , a :Dict , a :bool = False , a :bool = False , a :bool = False , a :bool = False , ) -> Dict: __UpperCamelCase : str = len(references[0] ) if any(len(a ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __UpperCamelCase : Dict = [[refs[i] for refs in references] for i in range(a )] __UpperCamelCase : Optional[Any] = TER( normalized=a , no_punct=a , asian_support=a , case_sensitive=a , ) __UpperCamelCase : int = sb_ter.corpus_score(a , a ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
232
from PIL import Image def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image , _lowerCamelCase : int) -> Image: '''simple docstring''' __UpperCamelCase : str = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change contrast to 170 lowercase : Tuple = change_contrast(img, 170) cont_img.save('image_data/lena_high_contrast.png', format='png')
232
1
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ): __a : Union[str, Any] = parent __a : List[str] = batch_size __a : List[str] = seq_length __a : Union[str, Any] = is_training __a : Optional[int] = use_input_mask __a : Union[str, Any] = use_token_type_ids __a : List[Any] = use_labels __a : str = vocab_size __a : Dict = hidden_size __a : List[str] = num_hidden_layers __a : List[str] = num_attention_heads __a : Tuple = intermediate_size __a : str = hidden_act __a : List[Any] = hidden_dropout_prob __a : Union[str, Any] = attention_probs_dropout_prob __a : Dict = max_position_embeddings __a : Dict = type_vocab_size __a : Dict = type_sequence_label_size __a : List[str] = initializer_range __a : int = num_labels __a : Optional[int] = num_choices __a : Optional[int] = scope def _lowerCamelCase ( self ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def _lowerCamelCase ( self ): __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Dict = None if self.use_input_mask: __a : str = random_attention_mask([self.batch_size, self.seq_length] ) __a : Any = None __a : Any = None __a : Any = None if self.use_labels: __a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[Any] = MPNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : int = model(_UpperCAmelCase , _UpperCAmelCase ) __a : Any = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Union[str, Any] = MPNetForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Tuple = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[Any] = self.num_labels __a : Optional[int] = MPNetForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Tuple = self.num_choices __a : Union[str, Any] = MPNetForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Tuple = self.num_labels __a : str = MPNetForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self ): __a : Optional[Any] = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) __lowerCAmelCase = ( { '''feature-extraction''': MPNetModel, '''fill-mask''': MPNetForMaskedLM, '''question-answering''': MPNetForQuestionAnswering, '''text-classification''': MPNetForSequenceClassification, '''token-classification''': MPNetForTokenClassification, '''zero-shot''': MPNetForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = True def _lowerCamelCase ( self ): __a : str = MPNetModelTester(self ) __a : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _lowerCamelCase ( self ): self.config_tester.run_common_tests() def _lowerCamelCase ( self ): __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*_UpperCAmelCase ) @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCamelCase ( self ): __a : Optional[int] = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) __a : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __a : List[Any] = model(_UpperCAmelCase )[0] __a : str = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _UpperCAmelCase ) __a : Tuple = torch.tensor( [[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
188
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = '''openai/whisper-base''' __lowerCAmelCase = ( '''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ''' '''transcribed text.''' ) __lowerCAmelCase = '''transcriber''' __lowerCAmelCase = WhisperProcessor __lowerCAmelCase = WhisperForConditionalGeneration __lowerCAmelCase = ['''audio'''] __lowerCAmelCase = ['''text'''] def _lowerCamelCase ( self , _UpperCAmelCase ): return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features def _lowerCamelCase ( self , _UpperCAmelCase ): return self.model.generate(inputs=_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase ): return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0]
188
1
"""simple docstring""" import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _a : str= logging.get_logger(__name__) def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) -> List[str]: '''simple docstring''' __snake_case : Union[str, Any] = set() __snake_case : List[str] = [] def parse_line(UpperCAmelCase_ : Tuple ): for line in fp: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __snake_case : str = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(UpperCAmelCase_ ) > 0: __snake_case : Optional[Any] = '\n'.join(UpperCAmelCase_ ) # Only keep the warnings specified in `targets` if any(F": {x}: " in warning for x in targets ): selected_warnings.add(UpperCAmelCase_ ) buffer.clear() continue else: __snake_case : Dict = line.strip() buffer.append(UpperCAmelCase_ ) if from_gh: for filename in os.listdir(UpperCAmelCase_ ): __snake_case : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if not os.path.isdir(UpperCAmelCase_ ): # read the file if filename != "warnings.txt": continue with open(UpperCAmelCase_ ) as fp: parse_line(UpperCAmelCase_ ) else: try: with zipfile.ZipFile(UpperCAmelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCAmelCase_ ): # read the file if filename != "warnings.txt": continue with z.open(UpperCAmelCase_ ) as fp: parse_line(UpperCAmelCase_ ) except Exception: logger.warning( F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) -> Dict: '''simple docstring''' __snake_case : Optional[Any] = set() __snake_case : Tuple = [os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) for p in os.listdir(UpperCAmelCase_ ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(UpperCAmelCase_ , UpperCAmelCase_ ) ) return selected_warnings if __name__ == "__main__": def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> str: '''simple docstring''' return values.split(',' ) _a : Optional[int]= argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) _a : Tuple= parser.parse_args() _a : Tuple= args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _a : int= get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _a : Dict= extract_warnings(args.output_dir, args.targets) _a : int= sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
172
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : int= logging.get_logger(__name__) _a : Optional[Any]= { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class UpperCamelCase ( lowercase ): UpperCAmelCase : List[Any] = """lilt""" def __init__(self : Dict , _A : Any=3_05_22 , _A : Union[str, Any]=7_68 , _A : Any=12 , _A : Tuple=12 , _A : Optional[int]=30_72 , _A : Tuple="gelu" , _A : str=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=5_12 , _A : Any=2 , _A : Tuple=0.02 , _A : List[str]=1E-12 , _A : Optional[int]=0 , _A : Optional[Any]="absolute" , _A : Any=None , _A : List[Any]=4 , _A : Optional[int]=10_24 , **_A : Union[str, Any] , ) -> Tuple: super().__init__(pad_token_id=_A , **_A) __snake_case : Optional[int] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : Optional[int] = num_attention_heads __snake_case : Optional[int] = hidden_act __snake_case : List[str] = intermediate_size __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : List[Any] = initializer_range __snake_case : Optional[Any] = layer_norm_eps __snake_case : Optional[int] = position_embedding_type __snake_case : Any = classifier_dropout __snake_case : Optional[int] = channel_shrink_ratio __snake_case : Tuple = max_ad_position_embeddings
172
1
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : Any = len(__SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCamelCase : List[Any] = 0 count += depth_first_search(__SCREAMING_SNAKE_CASE ,row + 1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) count += depth_first_search(__SCREAMING_SNAKE_CASE ,row - 1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) count += depth_first_search(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,col + 1 ,__SCREAMING_SNAKE_CASE ) count += depth_first_search(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,col - 1 ,__SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
370
"""simple docstring""" from typing import Any def lowercase__ ( lowercase_ ) -> list[Any]: """simple docstring""" if not input_list: return [] _UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list] _UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
310
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""} class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """ctrl""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase=246534 , lowercase=256 , lowercase=1280 , lowercase=8192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-6 , lowercase=0.02 , lowercase=True , **lowercase , ): _lowerCamelCase : Any = vocab_size _lowerCamelCase : Dict = n_positions _lowerCamelCase : Optional[int] = n_embd _lowerCamelCase : str = n_layer _lowerCamelCase : Union[str, Any] = n_head _lowerCamelCase : Any = dff _lowerCamelCase : int = resid_pdrop _lowerCamelCase : Dict = embd_pdrop _lowerCamelCase : Union[str, Any] = layer_norm_epsilon _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : str = use_cache super().__init__(**lowercase )
96
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class A_ ( unittest.TestCase ): def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,): __lowerCamelCase : int = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : Union[str, Any] = seq_length __lowerCamelCase : List[Any] = is_training __lowerCamelCase : Tuple = use_attention_mask __lowerCamelCase : List[str] = use_token_type_ids __lowerCamelCase : Any = use_labels __lowerCamelCase : List[str] = vocab_size __lowerCamelCase : Any = hidden_size __lowerCamelCase : Tuple = num_hidden_layers __lowerCamelCase : Union[str, Any] = num_attention_heads __lowerCamelCase : Union[str, Any] = intermediate_size __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : int = hidden_dropout_prob __lowerCamelCase : int = attention_probs_dropout_prob __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : Union[str, Any] = type_vocab_size __lowerCamelCase : List[str] = type_sequence_label_size __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : Optional[int] = num_choices def lowerCAmelCase ( self : Union[str, Any]): __lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) __lowerCamelCase : Union[str, Any] = None if self.use_attention_mask: __lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length]) __lowerCamelCase : str = DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,) return config, input_ids, attention_mask def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : List[str] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs __lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Dict = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = FlaxDistilBertModelTester(self) @slow def lowerCAmelCase ( self : int): for model_class_name in self.all_model_classes: __lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased') __lowerCamelCase : List[str] = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) @require_flax class A_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self : str): __lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased') __lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]]) __lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) __lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0] __lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8) self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
73
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def snake_case_ (UpperCamelCase : str ): '''simple docstring''' _a = filter(lambda UpperCamelCase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params _snake_case : Any = logging.getLogger(__name__) def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[str] ): '''simple docstring''' if metric == "rouge2": _a = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": _a = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": _a = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": _a = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ''' function.''' ) _a = ModelCheckpoint( dirpath=UpperCamelCase , filename=UpperCamelCase , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def snake_case_ (UpperCamelCase : int , UpperCamelCase : str ): '''simple docstring''' return EarlyStopping( monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=UpperCamelCase , verbose=UpperCamelCase , ) class A ( pl.Callback ): def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any: """simple docstring""" _a = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCAmelCase_ ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : str=True ) -> None: """simple docstring""" logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / '''test_results.txt''' _a = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / F'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''a+''' ) as writer: for key in sorted(lowerCAmelCase_ ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(lowerCAmelCase_ , torch.Tensor ): _a = val.item() _a = F'{key}: {val:.6f}\n' writer.write(lowerCAmelCase_ ) if not save_generations: return if "preds" in metrics: _a = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(lowerCAmelCase_ ) @rank_zero_only def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Optional[Any]: """simple docstring""" try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(lowerCAmelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> List[Any]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' ) @rank_zero_only def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
179
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def snake_case_ (UpperCamelCase : Namespace ): '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class A ( _a ): @staticmethod def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any: """simple docstring""" _a = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=lowerCAmelCase_ , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] , ) -> List[str]: """simple docstring""" _a = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(F'Loading model {model_type}' ) _a = model_type _a = tf_checkpoint _a = pytorch_dump_output _a = config _a = finetuning_task_name def __lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) if "ckpt" in self._tf_checkpoint.lower(): _a = self._tf_checkpoint _a = '''''' else: _a = self._tf_checkpoint _a = '''''' convert_transfo_xl_checkpoint_to_pytorch( lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
179
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() _snake_case : Tuple = logging.get_logger(__name__) def a_ ( lowerCAmelCase_ : Tuple ): # initialize config if "resnet-50" in model_name: __lowerCAmelCase = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: __lowerCAmelCase = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) __lowerCAmelCase = DetrConfig(use_timm_backbone=lowerCAmelCase_, backbone_config=lowerCAmelCase_ ) # set label attributes __lowerCAmelCase = 'panoptic' in model_name if is_panoptic: __lowerCAmelCase = 250 else: __lowerCAmelCase = 91 __lowerCAmelCase = 'huggingface/label-files' __lowerCAmelCase = 'coco-detection-id2label.json' __lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) __lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} return config, is_panoptic def a_ ( lowerCAmelCase_ : List[str] ): # here we list all keys to be renamed (original name on the left, our name on the right) __lowerCAmelCase = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple ): __lowerCAmelCase = state_dict.pop(lowerCAmelCase_ ) __lowerCAmelCase = val def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int=False ): __lowerCAmelCase = '' if is_panoptic: __lowerCAmelCase = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) __lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:256, :] __lowerCAmelCase = in_proj_bias[:256] __lowerCAmelCase = in_proj_weight[256:512, :] __lowerCAmelCase = in_proj_bias[256:512] __lowerCAmelCase = in_proj_weight[-256:, :] __lowerCAmelCase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention __lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) __lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:256, :] __lowerCAmelCase = in_proj_bias[:256] __lowerCAmelCase = in_proj_weight[256:512, :] __lowerCAmelCase = in_proj_bias[256:512] __lowerCAmelCase = in_proj_weight[-256:, :] __lowerCAmelCase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention __lowerCAmelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) __lowerCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict __lowerCAmelCase = in_proj_weight_cross_attn[:256, :] __lowerCAmelCase = in_proj_bias_cross_attn[:256] __lowerCAmelCase = in_proj_weight_cross_attn[256:512, :] __lowerCAmelCase = in_proj_bias_cross_attn[256:512] __lowerCAmelCase = in_proj_weight_cross_attn[-256:, :] __lowerCAmelCase = in_proj_bias_cross_attn[-256:] def a_ ( ): __lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Any=None, lowerCAmelCase_ : str=False ): __lowerCAmelCase , __lowerCAmelCase = get_detr_config(lowerCAmelCase_ ) # load original model from torch hub __lowerCAmelCase = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(F"""Converting model {model_name}...""" ) __lowerCAmelCase = torch.hub.load('facebookresearch/detr', model_name_to_original_name[model_name], pretrained=lowerCAmelCase_ ).eval() __lowerCAmelCase = detr.state_dict() # rename keys for src, dest in create_rename_keys(lowerCAmelCase_ ): if is_panoptic: __lowerCAmelCase = 'detr.' + src rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCAmelCase_, is_panoptic=lowerCAmelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __lowerCAmelCase = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): __lowerCAmelCase = state_dict.pop(lowerCAmelCase_ ) __lowerCAmelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: __lowerCAmelCase = state_dict.pop(lowerCAmelCase_ ) __lowerCAmelCase = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: __lowerCAmelCase = state_dict.pop(lowerCAmelCase_ ) __lowerCAmelCase = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): __lowerCAmelCase = state_dict.pop(lowerCAmelCase_ ) __lowerCAmelCase = val # finally, create HuggingFace model and load state dict __lowerCAmelCase = DetrForSegmentation(lowerCAmelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() # verify our conversion on an image __lowerCAmelCase = 'coco_panoptic' if is_panoptic else 'coco_detection' __lowerCAmelCase = DetrImageProcessor(format=lowerCAmelCase_ ) __lowerCAmelCase = processor(images=prepare_img(), return_tensors='pt' ) __lowerCAmelCase = encoding['pixel_values'] __lowerCAmelCase = detr(lowerCAmelCase_ ) __lowerCAmelCase = model(lowerCAmelCase_ ) assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=1E-3 ) assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(F"""nielsr/{model_name}""" ) processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": _snake_case : List[str] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') _snake_case : Any = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
284
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( 'kwargs, expected', [ ({'num_shards': 0, 'max_num_jobs': 1}, []), ({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]), ({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]), ({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]), ({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]), ({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]), ], ) def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ): __lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ ) assert out == expected @pytest.mark.parametrize( 'gen_kwargs, max_num_jobs, expected', [ ({'foo': 0}, 10, [{'foo': 0}]), ({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]), ({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]), ({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]), ({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]), ], ) def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ): __lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ ) assert out == expected @pytest.mark.parametrize( 'gen_kwargs, expected', [ ({'foo': 0}, 1), ({'shards': [0]}, 1), ({'shards': [0, 1, 2, 3]}, 4), ({'shards': [0, 1, 2, 3], 'foo': 0}, 4), ({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4), ({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError), ], ) def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ): if expected is RuntimeError: with pytest.raises(lowerCAmelCase_ ): _number_of_shards_in_gen_kwargs(lowerCAmelCase_ ) else: __lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ ) assert out == expected
284
1
def a( A : str ) -> list: """simple docstring""" if n_term == "": return [] a = [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": _lowercase: str = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
71
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" _lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" _lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" _lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" _lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ): """simple docstring""" if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor: a = [] a = Counter() a = 0 a = defaultdict(lowerCamelCase_ ) for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): for candidate in candidates: a = candidate + "\n" + test_case a = (test_program, timeout, task_id, completion_id[task_id]) a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ ) futures.append(lowerCamelCase_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(lowerCamelCase_ ): a = future.result() results[result["task_id"]].append((result["completion_id"], result) ) a , a = [], [] for result in results.values(): result.sort() a = [r[1]["passed"] for r in result] total.append(len(lowerCamelCase_ ) ) correct.append(sum(lowerCamelCase_ ) ) a = np.array(lowerCamelCase_ ) a = np.array(lowerCamelCase_ ) a = k a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]: """simple docstring""" def estimator(A : int , A : int , A : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(A , A ): a = itertools.repeat(A , len(A ) ) else: assert len(A ) == len(A ) a = iter(A ) return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
71
1
"""simple docstring""" from typing import Dict, Optional import numpy as np import datasets UpperCAmelCase__ : int = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' UpperCAmelCase__ : List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' UpperCAmelCase__ : Union[str, Any] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = False ,): if label_map is not None: for old_id, new_id in label_map.items(): SCREAMING_SNAKE_CASE__ : Dict = new_id # turn into Numpy arrays SCREAMING_SNAKE_CASE__ : Dict = np.array(_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = np.array(_snake_case ) if reduce_labels: SCREAMING_SNAKE_CASE__ : Dict = 255 SCREAMING_SNAKE_CASE__ : Dict = label - 1 SCREAMING_SNAKE_CASE__ : int = 255 SCREAMING_SNAKE_CASE__ : Optional[int] = label != ignore_index SCREAMING_SNAKE_CASE__ : Dict = np.not_equal(_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[int] = pred_label[mask] SCREAMING_SNAKE_CASE__ : Any = np.array(_snake_case )[mask] SCREAMING_SNAKE_CASE__ : Tuple = pred_label[pred_label == label] SCREAMING_SNAKE_CASE__ : str = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : Any = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : List[str] = np.histogram(_snake_case ,bins=_snake_case ,range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((num_labels,) ,dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa ) for result, gt_seg_map in zip(_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = intersect_and_union( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = False ,): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = total_intersect_and_union( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) # compute metrics SCREAMING_SNAKE_CASE__ : Optional[Any] = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum() SCREAMING_SNAKE_CASE__ : Dict = total_area_intersect / total_area_union SCREAMING_SNAKE_CASE__ : Optional[Any] = total_area_intersect / total_area_label SCREAMING_SNAKE_CASE__ : int = np.nanmean(_snake_case ) SCREAMING_SNAKE_CASE__ : Dict = np.nanmean(_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = all_acc SCREAMING_SNAKE_CASE__ : List[Any] = iou SCREAMING_SNAKE_CASE__ : Tuple = acc if nan_to_num is not None: SCREAMING_SNAKE_CASE__ : Any = {metric: np.nan_to_num(_snake_case ,nan=_snake_case ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ (datasets.Metric ): """simple docstring""" def __magic_name__ (self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ), } ) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = mean_iou( results=SCREAMING_SNAKE_CASE__ , gt_seg_maps=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , ignore_index=SCREAMING_SNAKE_CASE__ , nan_to_num=SCREAMING_SNAKE_CASE__ , label_map=SCREAMING_SNAKE_CASE__ , reduce_labels=SCREAMING_SNAKE_CASE__ , ) return iou_result
25
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
0
'''simple docstring''' from copy import deepcopy class UpperCAmelCase__ : """simple docstring""" def __init__( self : List[str] ,_a : Tuple = None ,_a : Any = None ): '''simple docstring''' if arr is None and size is not None: _a : Dict = size _a : int = [0] * size elif arr is not None: self.init(lowercase_ ) else: raise ValueError('Either arr or size must be specified' ) def __lowercase ( self : int ,_a : Optional[int] ): '''simple docstring''' _a : Union[str, Any] = len(lowercase_ ) _a : List[Any] = deepcopy(lowercase_ ) for i in range(1 ,self.size ): _a : Tuple = self.next_(lowercase_ ) if j < self.size: self.tree[j] += self.tree[i] def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : List[str] = self.tree[:] for i in range(self.size - 1 ,0 ,-1 ): _a : Optional[Any] = self.next_(lowercase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __lowercase ( _a : Dict ): '''simple docstring''' return index + (index & (-index)) @staticmethod def __lowercase ( _a : Dict ): '''simple docstring''' return index - (index & (-index)) def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ,_a : Union[str, Any] ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value _a : Tuple = self.next_(lowercase_ ) def __lowercase ( self : int ,_a : Optional[int] ,_a : Union[str, Any] ): '''simple docstring''' self.add(lowercase_ ,value - self.get(lowercase_ ) ) def __lowercase ( self : List[str] ,_a : int ): '''simple docstring''' if right == 0: return 0 _a : int = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] _a : List[Any] = self.prev(lowercase_ ) return result def __lowercase ( self : Optional[int] ,_a : Dict ,_a : List[str] ): '''simple docstring''' return self.prefix(lowercase_ ) - self.prefix(lowercase_ ) def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' return self.query(lowercase_ ,index + 1 ) def __lowercase ( self : Tuple ,_a : Any ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 _a : str = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 _a : Tuple = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
363
'''simple docstring''' import argparse from collections import defaultdict import yaml __lowerCAmelCase = """docs/source/en/_toctree.yml""" def UpperCAmelCase_ (__a : str ): """simple docstring""" _a : Any = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 _a : List[str] = [key for key, value in counts.items() if value > 1] _a : str = [] for duplicate_key in duplicates: _a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(__a ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(__a , key=lambda __a : s["title"].lower() ) def UpperCAmelCase_ (__a : Optional[int]=False ): """simple docstring""" with open(__a , encoding='utf-8' ) as f: _a : Tuple = yaml.safe_load(f.read() ) # Get to the API doc _a : Union[str, Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _a : Union[str, Any] = content[api_idx]['sections'] # Then to the model doc _a : List[str] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _a : List[str] = api_doc[model_idx]['sections'] _a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section] _a : Tuple = False for idx, modality_doc in modalities_docs: _a : List[Any] = modality_doc['sections'] _a : Any = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: _a : Union[str, Any] = True if overwrite: _a : str = new_modality_doc if diff: if overwrite: _a : Dict = model_doc _a : Dict = api_doc with open(__a , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") __lowerCAmelCase = parser.parse_args() check_model_doc(args.fix_and_overwrite)
5
0