code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCAmelCase_ = """\ @inproceedings{snover-etal-2006-study, title = \"A Study of Translation Edit Rate with Targeted Human Annotation\", author = \"Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John\", booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\", month = aug # \" 8-12\", year = \"2006\", address = \"Cambridge, Massachusetts, USA\", publisher = \"Association for Machine Translation in the Americas\", url = \"https://aclanthology.org/2006.amta-papers.25\", pages = \"223--231\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ UpperCAmelCase_ = """\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. """ UpperCAmelCase_ = """ Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: 'score' (float): TER score (num_edits / sum_ref_lengths * 100) 'num_edits' (int): The cumulative number of edits 'ref_length' (float): The cumulative average reference length Examples: Example 1: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0} Example 2: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0} Example 3: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5} Example 4: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0} Example 5: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> Optional[Any]: if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> Dict: _snake_case = len(references[0] ) if any(len(_A ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) _snake_case = [[refs[i] for refs in references] for i in range(_A )] _snake_case = TER( normalized=_A , no_punct=_A , asian_support=_A , case_sensitive=_A , ) _snake_case = sb_ter.corpus_score(_A , _A ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
350
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]: super().__init__() _snake_case = nn.Sequential( nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , ) _snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.ModuleList() for lyr_num in range(lowerCAmelCase_ ): # FiLM conditional T5 decoder _snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) self.decoders.append(lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case , _snake_case , _snake_case = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _snake_case = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _snake_case = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _snake_case = torch.broadcast_to( torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _snake_case = self.position_encoding(lowerCAmelCase_ ) _snake_case = self.continuous_inputs_projection(lowerCAmelCase_ ) inputs += position_encodings _snake_case = self.dropout(lowerCAmelCase_ ) # decoder: No padding present. _snake_case = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings _snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _snake_case = lyr( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0] _snake_case = self.decoder_norm(lowerCAmelCase_ ) _snake_case = self.post_dropout(lowerCAmelCase_ ) _snake_case = self.spec_out(lowerCAmelCase_ ) return spec_out class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple: super().__init__() _snake_case = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple: _snake_case = self.layer[0]( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) if encoder_hidden_states is not None: _snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _snake_case = self.layer[1]( lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) # Apply Film Conditional Feed Forward layer _snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ ) return (hidden_states,) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: super().__init__() _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str: # pre_self_attention_layer_norm _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ ) # Self-attention block _snake_case = self.attention(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__() _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict: _snake_case = self.layer_norm(lowerCAmelCase_ ) _snake_case = self.attention( lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return layer_output class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: super().__init__() _snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]: _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.DenseReluDense(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) _snake_case = NewGELUActivation() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any: _snake_case = self.act(self.wi_a(lowerCAmelCase_ ) ) _snake_case = self.wi_a(lowerCAmelCase_ ) _snake_case = hidden_gelu * hidden_linear _snake_case = self.dropout(lowerCAmelCase_ ) _snake_case = self.wo(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str: super().__init__() _snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) ) _snake_case = eps def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ ) _snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _snake_case = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class UpperCamelCase_ ( nn.Module ): def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) )) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = self.scale_bias(lowerCAmelCase_ ) _snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 ) _snake_case = x * (1 + scale) + shift return x
295
0
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str="attention" ) -> Optional[int]: '''simple docstring''' _snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel'''] _snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel'''] _snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel'''] _snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel'''] return k, o, q, v def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any]=False ) -> List[str]: '''simple docstring''' if split_mlp_wi: _snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel'''] _snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel'''] _snake_case = (wi_a, wi_a) else: _snake_case = params[F'''{prefix}/layers_{i}/mlp/wi/kernel'''] _snake_case = params[F'''{prefix}/layers_{i}/mlp/wo/kernel'''] return wi, wo def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> int: '''simple docstring''' return params[F'''{prefix}/layers_{i}/{layer_name}/scale'''] def lowerCamelCase__ ( UpperCamelCase__ : dict , *, UpperCamelCase__ : int , UpperCamelCase__ : bool ) -> List[str]: '''simple docstring''' _snake_case = traverse_util.flatten_dict(variables['target'] ) _snake_case = {"""/""".join(A__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _snake_case = """encoder/layers_0/mlp/wi_0/kernel""" in old print('Split MLP:' , A__ ) _snake_case = collections.OrderedDict() # Shared embeddings. _snake_case = old["""token_embedder/embedding"""] # Encoder. for i in range(A__ ): # Block i, layer 0 (Self Attention). _snake_case = tax_layer_norm_lookup(A__ , A__ , 'encoder' , 'pre_attention_layer_norm' ) _snake_case = tax_attention_lookup(A__ , A__ , 'encoder' , 'attention' ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 1 (MLP). _snake_case = tax_layer_norm_lookup(A__ , A__ , 'encoder' , 'pre_mlp_layer_norm' ) _snake_case = tax_mlp_lookup(A__ , A__ , 'encoder' , A__ ) _snake_case = layer_norm if split_mlp_wi: _snake_case = wi[0].T _snake_case = wi[1].T else: _snake_case = wi.T _snake_case = wo.T _snake_case = old[ """encoder/relpos_bias/rel_embedding""" ].T _snake_case = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(A__ ): # Block i, layer 0 (Self Attention). _snake_case = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_self_attention_layer_norm' ) _snake_case = tax_attention_lookup(A__ , A__ , 'decoder' , 'self_attention' ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 1 (Cross Attention). _snake_case = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_cross_attention_layer_norm' ) _snake_case = tax_attention_lookup(A__ , A__ , 'decoder' , 'encoder_decoder_attention' ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 2 (MLP). _snake_case = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_mlp_layer_norm' ) _snake_case = tax_mlp_lookup(A__ , A__ , 'decoder' , A__ ) _snake_case = layer_norm if split_mlp_wi: _snake_case = wi[0].T _snake_case = wi[1].T else: _snake_case = wi.T _snake_case = wo.T _snake_case = old["""decoder/decoder_norm/scale"""] _snake_case = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _snake_case = old["""decoder/logits_dense/kernel"""].T return new def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : bool ) -> int: '''simple docstring''' _snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _snake_case = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _snake_case = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) _snake_case = state_dict["""shared.weight"""] return state_dict def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> List[str]: '''simple docstring''' _snake_case = checkpoints.load_tax_checkpoint(A__ ) _snake_case = convert_tax_to_pytorch(A__ , num_layers=config.num_layers , is_encoder_only=A__ ) _snake_case = make_state_dict(A__ , A__ ) model.load_state_dict(A__ , strict=A__ ) def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : bool = False ) -> List[Any]: '''simple docstring''' _snake_case = TaConfig.from_json_file(A__ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _snake_case = TaEncoderModel(A__ ) else: _snake_case = TaForConditionalGeneration(A__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(A__ , A__ , A__ , A__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(A__ ) # Verify that we can load the checkpoint. model.from_pretrained(A__ ) print('Done' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) UpperCAmelCase_ = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
351
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''gpt_neo''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple: _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_layers _snake_case = num_heads _snake_case = intermediate_size _snake_case = window_size _snake_case = activation_function _snake_case = resid_dropout _snake_case = embed_dropout _snake_case = attention_dropout _snake_case = classifier_dropout _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = use_cache _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = attention_types _snake_case = self.expand_attention_types_params(lowerCAmelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' import torch _snake_case = input.size() _snake_case = len(UpperCamelCase__ ) _snake_case = shape[dimension] _snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ ) _snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1 _snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None] _snake_case = [slice(UpperCamelCase__ )] * rank _snake_case = indices _snake_case = input[s] _snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' import torch _snake_case = torch.arange(1 , UpperCamelCase__ ) _snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = remainders == 0 _snake_case = candidates[divisor_indices] _snake_case = torch.max(UpperCamelCase__ ) return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' ) class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) _snake_case = {0: 'batch', 1: 'past_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCAmelCase ( self ) -> int: return self._config.num_heads def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: _snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) # We need to order the input in the way they appears in the forward() _snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers ) ] _snake_case = common_inputs['attention_mask'] if self.use_past: _snake_case = ordered_inputs['attention_mask'].dtype _snake_case = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self ) -> int: return 13
295
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCamelCase_ ( __lowercase ): lowerCAmelCase_ = CustomTokenizer pass
352
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' _snake_case , _snake_case = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _snake_case = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
295
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = (IPNDMScheduler,) lowerCAmelCase_ = (("num_inference_steps", 50),) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = {'num_train_timesteps': 1000} config.update(**lowerCAmelCase_ ) return config def lowerCAmelCase ( self , lowerCAmelCase_=0 , **lowerCAmelCase_ ) -> Dict: _snake_case = dict(self.forward_default_kwargs ) _snake_case = kwargs.pop('num_inference_steps' , lowerCAmelCase_ ) _snake_case = self.dummy_sample _snake_case = 0.1 * sample _snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config(**lowerCAmelCase_ ) _snake_case = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals _snake_case = dummy_past_residuals[:] if time_step is None: _snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase_ ) _snake_case = scheduler_class.from_pretrained(lowerCAmelCase_ ) new_scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals _snake_case = dummy_past_residuals[:] _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample _snake_case = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample _snake_case = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self ) -> Optional[int]: pass def lowerCAmelCase ( self , lowerCAmelCase_=0 , **lowerCAmelCase_ ) -> str: _snake_case = dict(self.forward_default_kwargs ) _snake_case = kwargs.pop('num_inference_steps' , lowerCAmelCase_ ) _snake_case = self.dummy_sample _snake_case = 0.1 * sample _snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residuals (must be after setting timesteps) _snake_case = dummy_past_residuals[:] if time_step is None: _snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase_ ) _snake_case = scheduler_class.from_pretrained(lowerCAmelCase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase_ ) # copy over dummy past residual (must be after setting timesteps) _snake_case = dummy_past_residuals[:] _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample _snake_case = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample _snake_case = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Dict: _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config(**lowerCAmelCase_ ) _snake_case = scheduler_class(**lowerCAmelCase_ ) _snake_case = 10 _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample return sample def lowerCAmelCase ( self ) -> int: _snake_case = dict(self.forward_default_kwargs ) _snake_case = kwargs.pop('num_inference_steps' , lowerCAmelCase_ ) for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**lowerCAmelCase_ ) _snake_case = self.dummy_sample _snake_case = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase_ , 'set_timesteps' ): scheduler.set_timesteps(lowerCAmelCase_ ) elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , 'set_timesteps' ): _snake_case = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _snake_case = dummy_past_residuals[:] _snake_case = scheduler.timesteps[5] _snake_case = scheduler.timesteps[6] _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase ( self ) -> Tuple: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ , time_step=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[int]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowerCAmelCase_ , time_step=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.full_loop() _snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
353
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' _snake_case = VideoMAEConfig() set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ ) if "finetuned" not in model_name: _snake_case = False if "finetuned" in model_name: _snake_case = 'huggingface/label-files' if "kinetics" in model_name: _snake_case = 400 _snake_case = 'kinetics400-id2label.json' elif "ssv2" in model_name: _snake_case = 174 _snake_case = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) _snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) _snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int: '''simple docstring''' if "small" in model_name: _snake_case = 384 _snake_case = 1_536 _snake_case = 12 _snake_case = 16 _snake_case = 12 _snake_case = 3 _snake_case = 192 _snake_case = 768 elif "large" in model_name: _snake_case = 1_024 _snake_case = 4_096 _snake_case = 24 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 512 _snake_case = 2_048 elif "huge" in model_name: _snake_case = 1_280 _snake_case = 5_120 _snake_case = 32 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 640 _snake_case = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' if "encoder." in name: _snake_case = name.replace('encoder.' , '' ) if "cls_token" in name: _snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: _snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: _snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: _snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: _snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: _snake_case = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: _snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: _snake_case = name.replace('attn' , 'attention.self' ) if "attn" in name: _snake_case = name.replace('attn' , 'attention.attention' ) if "norm1" in name: _snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: _snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: _snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: _snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: _snake_case = name.replace('head' , 'classifier' ) return name def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): _snake_case = orig_state_dict.pop(UpperCamelCase__ ) if key.startswith('encoder.' ): _snake_case = key.replace('encoder.' , '' ) if "qkv" in key: _snake_case = key.split('.' ) if key.startswith('decoder.blocks' ): _snake_case = config.decoder_hidden_size _snake_case = int(key_split[2] ) _snake_case = 'decoder.decoder_layers.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = config.hidden_size _snake_case = int(key_split[1] ) _snake_case = 'videomae.encoder.layer.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = val return orig_state_dict def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _snake_case = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) _snake_case = np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' _snake_case = get_videomae_config(UpperCamelCase__ ) if "finetuned" in model_name: _snake_case = VideoMAEForVideoClassification(UpperCamelCase__ ) else: _snake_case = VideoMAEForPreTraining(UpperCamelCase__ ) # download original checkpoint, hosted on Google Drive _snake_case = 'pytorch_model.bin' gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' ) if "model" in files: _snake_case = files['model'] else: _snake_case = files['module'] _snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify model on basic input _snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _snake_case = prepare_video() _snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' ) if "finetuned" not in model_name: _snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) _snake_case = torch.load(UpperCamelCase__ ) _snake_case = model(**UpperCamelCase__ ) _snake_case = outputs.logits _snake_case = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one _snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": _snake_case = outputs.loss assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(UpperCamelCase__ , organization='nielsr' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
295
0
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__) class UpperCamelCase_ ( folder_based_builder.FolderBasedBuilderConfig ): lowerCAmelCase_ = None lowerCAmelCase_ = None class UpperCamelCase_ ( folder_based_builder.FolderBasedBuilder ): lowerCAmelCase_ = datasets.Audio() lowerCAmelCase_ = "audio" lowerCAmelCase_ = AudioFolderConfig lowerCAmelCase_ = 42 # definition at the bottom of the script lowerCAmelCase_ = AudioClassification(audio_column='''audio''' , label_column='''label''' ) UpperCAmelCase_ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCAmelCase_ = AUDIO_EXTENSIONS
354
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
295
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int = None , UpperCamelCase__ : Any = None ) -> Tuple: '''simple docstring''' if start is None: _snake_case = 0 if end is None: _snake_case = len(UpperCamelCase__ ) - 1 if start >= end: return _snake_case = (start + end) // 2 slowsort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) slowsort(UpperCamelCase__ , mid + 1 , UpperCamelCase__ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case = sequence[mid], sequence[end] slowsort(UpperCamelCase__ , UpperCamelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
355
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=0.999 , UpperCamelCase__ : str="cosine" , ) -> Dict: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCamelCase__ : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCamelCase__ : str ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _snake_case = [] for i in range(lowerCamelCase_ ): _snake_case = i / num_diffusion_timesteps _snake_case = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase_ ) / alpha_bar_fn(lowerCamelCase_ ) , lowerCamelCase_ ) ) return torch.tensor(lowerCamelCase_ , dtype=torch.floataa ) class UpperCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase_ = [e.name for e in KarrasDiffusionSchedulers] lowerCAmelCase_ = 2 @register_to_config def __init__( self , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = 0.0_00_85 , lowerCAmelCase_ = 0.0_12 , lowerCAmelCase_ = "linear" , lowerCAmelCase_ = None , lowerCAmelCase_ = "epsilon" , lowerCAmelCase_ = "linspace" , lowerCAmelCase_ = 0 , ) -> Optional[Any]: if trained_betas is not None: _snake_case = torch.tensor(__lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": _snake_case = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _snake_case = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _snake_case = betas_for_alpha_bar(__lowerCamelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _snake_case = 1.0 - self.betas _snake_case = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str: if schedule_timesteps is None: _snake_case = self.timesteps _snake_case = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _snake_case = 1 if len(__lowerCamelCase ) > 1 else 0 else: _snake_case = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep _snake_case = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCAmelCase ( self ) -> Any: if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor: _snake_case = self.index_for_timestep(__lowerCamelCase ) if self.state_in_first_order: _snake_case = self.sigmas[step_index] else: _snake_case = self.sigmas_interpol[step_index] _snake_case = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> Union[str, Any]: _snake_case = num_inference_steps _snake_case = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _snake_case = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": _snake_case = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _snake_case = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _snake_case = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _snake_case = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) _snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _snake_case = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase ) _snake_case = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase ) _snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _snake_case = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ) # interpolate sigmas _snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() _snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _snake_case = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__lowerCamelCase ).startswith('mps' ): # mps does not support float64 _snake_case = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa ) else: _snake_case = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) # interpolate timesteps _snake_case = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype ) _snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() _snake_case = torch.cat([timesteps[:1], interleaved_timesteps] ) _snake_case = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _snake_case = defaultdict(__lowerCamelCase ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _snake_case = sigma.log() # get distribution _snake_case = log_sigma - self.log_sigmas[:, None] # get sigmas range _snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _snake_case = low_idx + 1 _snake_case = self.log_sigmas[low_idx] _snake_case = self.log_sigmas[high_idx] # interpolate sigmas _snake_case = (low - log_sigma) / (low - high) _snake_case = w.clamp(0 , 1 ) # transform interpolation to time range _snake_case = (1 - w) * low_idx + w * high_idx _snake_case = t.view(sigma.shape ) return t @property def lowerCAmelCase ( self ) -> Tuple: return self.sample is None def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]: _snake_case = self.index_for_timestep(__lowerCamelCase ) # advance index counter by 1 _snake_case = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _snake_case = self.sigmas[step_index] _snake_case = self.sigmas_interpol[step_index + 1] _snake_case = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _snake_case = self.sigmas[step_index - 1] _snake_case = self.sigmas_interpol[step_index] _snake_case = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _snake_case = 0 _snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _snake_case = sigma_hat if self.state_in_first_order else sigma_interpol _snake_case = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _snake_case = sigma_hat if self.state_in_first_order else sigma_interpol _snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample' ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _snake_case = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _snake_case = sigma_interpol - sigma_hat # store for 2nd order step _snake_case = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _snake_case = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _snake_case = sigma_next - sigma_hat _snake_case = self.sample _snake_case = None _snake_case = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCamelCase ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor: _snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ): # mps does not support float64 _snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa ) _snake_case = timesteps.to(original_samples.device , dtype=torch.floataa ) else: _snake_case = self.timesteps.to(original_samples.device ) _snake_case = timesteps.to(original_samples.device ) _snake_case = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps] _snake_case = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _snake_case = sigma.unsqueeze(-1 ) _snake_case = original_samples + noise * sigma return noisy_samples def __len__( self ) -> Optional[Any]: return self.config.num_train_timesteps
356
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' _snake_case = OmegaConf.load(UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] _snake_case = list(state_dict.keys() ) # extract state_dict for VQVAE _snake_case = {} _snake_case = 'first_stage_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] # extract state_dict for UNetLDM _snake_case = {} _snake_case = 'model.diffusion_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] _snake_case = config.model.params.first_stage_config.params _snake_case = config.model.params.unet_config.params _snake_case = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _snake_case = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _snake_case = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) UpperCAmelCase_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
295
0
import random from typing import Any def lowerCamelCase__ ( UpperCamelCase__ : list ) -> list[Any]: '''simple docstring''' for _ in range(len(UpperCAmelCase_ ) ): _snake_case = random.randint(0 , len(UpperCAmelCase_ ) - 1 ) _snake_case = random.randint(0 , len(UpperCAmelCase_ ) - 1 ) _snake_case = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase_ = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase_ = ["python", "says", "hello", "!"] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
357
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase_ : @property def lowerCAmelCase ( self ) -> int: return self.get_dummy_input() @property def lowerCAmelCase ( self ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]: _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) return dummy_input def lowerCAmelCase ( self ) -> Tuple: _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) unet_block.to(lowerCAmelCase_ ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowerCAmelCase ( self ) -> Tuple: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() _snake_case = model(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ ) _snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward()
295
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class UpperCamelCase_ ( lowerCamelCase__ ): lowerCAmelCase_ = '''blip_text_model''' def __init__( self , lowerCAmelCase_=3_0524 , lowerCAmelCase_=768 , lowerCAmelCase_=768 , lowerCAmelCase_=3072 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=8 , lowerCAmelCase_=512 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3_0522 , lowerCAmelCase_=2 , lowerCAmelCase_=0 , lowerCAmelCase_=102 , lowerCAmelCase_=True , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Any: super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , sep_token_id=__snake_case , **__snake_case , ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = encoder_hidden_size _snake_case = intermediate_size _snake_case = projection_dim _snake_case = hidden_dropout_prob _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = max_position_embeddings _snake_case = layer_norm_eps _snake_case = hidden_act _snake_case = initializer_range _snake_case = attention_probs_dropout_prob _snake_case = is_decoder _snake_case = use_cache @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]: cls._set_token_in_kwargs(__snake_case ) _snake_case = cls.get_config_dict(__snake_case , **__snake_case ) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": _snake_case = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__snake_case , **__snake_case ) class UpperCamelCase_ ( lowerCamelCase__ ): lowerCAmelCase_ = '''blip_vision_model''' def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=3072 , lowerCAmelCase_=512 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=384 , lowerCAmelCase_=16 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-10 , **lowerCAmelCase_ , ) -> Union[str, Any]: super().__init__(**__snake_case ) _snake_case = hidden_size _snake_case = intermediate_size _snake_case = projection_dim _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = patch_size _snake_case = image_size _snake_case = initializer_range _snake_case = attention_dropout _snake_case = layer_norm_eps _snake_case = hidden_act @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: cls._set_token_in_kwargs(__snake_case ) _snake_case = cls.get_config_dict(__snake_case , **__snake_case ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": _snake_case = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__snake_case , **__snake_case ) class UpperCamelCase_ ( lowerCamelCase__ ): lowerCAmelCase_ = '''blip''' lowerCAmelCase_ = True def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=512 , lowerCAmelCase_=2.65_92 , lowerCAmelCase_=256 , **lowerCAmelCase_ , ) -> Optional[Any]: super().__init__(**__snake_case ) if text_config is None: _snake_case = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' ) if vision_config is None: _snake_case = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' ) _snake_case = BlipTextConfig(**__snake_case ) _snake_case = BlipVisionConfig(**__snake_case ) _snake_case = self.vision_config.hidden_size _snake_case = projection_dim _snake_case = logit_scale_init_value _snake_case = 1.0 _snake_case = 0.02 _snake_case = image_text_hidden_size @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case ) def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = copy.deepcopy(self.__dict__ ) _snake_case = self.text_config.to_dict() _snake_case = self.vision_config.to_dict() _snake_case = self.__class__.model_type return output
358
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case , _snake_case = 9, 14 # noqa: F841 _snake_case = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case = mst(UpperCamelCase__ ) _snake_case = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case = tuple(answer[:2] ) _snake_case = tuple(edge[::-1] ) assert edge in result or reverse in result
295
0
from math import sqrt def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> Any: '''simple docstring''' _snake_case = 0 for i in range(1 , int(sqrt(lowerCAmelCase__ ) + 1 ) ): if n % i == 0 and i != sqrt(lowerCAmelCase__ ): total += i + n // i elif i == sqrt(lowerCAmelCase__ ): total += i return total - n def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] = 10_000 ) -> Any: '''simple docstring''' _snake_case = sum( i for i in range(1 , lowerCAmelCase__ ) if sum_of_divisors(sum_of_divisors(lowerCAmelCase__ ) ) == i and sum_of_divisors(lowerCAmelCase__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
359
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
295
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=36 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> int: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_input_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = scope def lowerCAmelCase ( self ) -> int: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_input_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = ids_tensor([self.batch_size] , self.num_choices ) _snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self ) -> Tuple: return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.get_config() _snake_case = 300 return config def lowerCAmelCase ( self ) -> List[Any]: ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = self.prepare_config_and_inputs() _snake_case = True _snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = MraModel(config=a__ ) model.to(a__ ) model.eval() _snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ ) _snake_case = model(a__ , token_type_ids=a__ ) _snake_case = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> int: _snake_case = True _snake_case = MraModel(a__ ) model.to(a__ ) model.eval() _snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , ) _snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , ) _snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = MraForMaskedLM(config=a__ ) model.to(a__ ) model.eval() _snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = MraForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() _snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _snake_case = self.num_labels _snake_case = MraForSequenceClassification(a__ ) model.to(a__ ) model.eval() _snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = self.num_labels _snake_case = MraForTokenClassification(config=a__ ) model.to(a__ ) model.eval() _snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _snake_case = self.num_choices _snake_case = MraForMultipleChoice(config=a__ ) model.to(a__ ) model.eval() _snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase_ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = () def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = MraModelTester(self ) _snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def lowerCAmelCase ( self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case = type self.model_tester.create_and_check_model(*a__ ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a__ ) def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a__ ) def lowerCAmelCase ( self ) -> int: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def lowerCAmelCase ( self ) -> str: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) @slow def lowerCAmelCase ( self ) -> Optional[int]: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = MraModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip(reason='MRA does not output attentions' ) def lowerCAmelCase ( self ) -> Dict: return @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) _snake_case = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _snake_case = model(a__ )[0] _snake_case = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , a__ ) _snake_case = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> int: _snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) _snake_case = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _snake_case = model(a__ )[0] _snake_case = 5_0265 _snake_case = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , a__ ) _snake_case = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> str: _snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) _snake_case = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): _snake_case = model(a__ )[0] _snake_case = 5_0265 _snake_case = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , a__ ) _snake_case = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
360
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCamelCase_ ( enum.Enum ): lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 @add_end_docstrings(_lowerCamelCase ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _snake_case = None if self.model.config.prefix is not None: _snake_case = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _snake_case = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params ) _snake_case = {**self._preprocess_params, **preprocess_params} _snake_case = {**self._forward_params, **forward_params} def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple: _snake_case = {} if prefix is not None: _snake_case = prefix if prefix: _snake_case = self.tokenizer( lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) _snake_case = handle_long_generation preprocess_params.update(lowerCAmelCase_ ) _snake_case = generate_kwargs _snake_case = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.TENSORS if return_type is not None: _snake_case = return_type if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if stop_sequence is not None: _snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any: _snake_case = self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prompt_text if handle_long_generation == "hole": _snake_case = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _snake_case = generate_kwargs['max_new_tokens'] else: _snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _snake_case = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) _snake_case = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _snake_case = inputs['attention_mask'][:, -keep_length:] return inputs def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = model_inputs['input_ids'] _snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ ) # Allow empty prompts if input_ids.shape[1] == 0: _snake_case = None _snake_case = None _snake_case = 1 else: _snake_case = input_ids.shape[0] _snake_case = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _snake_case = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: _snake_case = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _snake_case = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = generated_sequence.shape[0] if self.framework == "pt": _snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int: _snake_case = model_outputs['generated_sequence'][0] _snake_case = model_outputs['input_ids'] _snake_case = model_outputs['prompt_text'] _snake_case = generated_sequence.numpy().tolist() _snake_case = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _snake_case = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _snake_case = self.tokenizer.decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _snake_case = 0 else: _snake_case = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) ) if return_type == ReturnType.FULL_TEXT: _snake_case = prompt_text + text[prompt_length:] else: _snake_case = text[prompt_length:] _snake_case = {'generated_text': all_text} records.append(lowerCAmelCase_ ) return records
295
0
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder UpperCAmelCase_ = "base_with_context" def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> int: '''simple docstring''' _snake_case = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): _snake_case = weights[F'''layers_{lyr_num}'''] _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _snake_case = ly_weight['''attention'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' _snake_case = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): _snake_case = weights[F'''layers_{lyr_num}'''] _snake_case = ly_weight['''attention'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCamelCase__ ) _snake_case = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _snake_case = weights[F'''layers_{lyr_num}'''] _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) _snake_case = ly_weight['''self_attention'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = ly_weight['''MultiHeadDotProductAttention_0'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Tuple: '''simple docstring''' _snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _snake_case = jnp.tree_util.tree_map(onp.array , UpperCamelCase__ ) _snake_case = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] _snake_case = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) _snake_case = inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ ) _snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) _snake_case = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) _snake_case = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) _snake_case = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _snake_case = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , UpperCamelCase__ ) _snake_case = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , UpperCamelCase__ ) _snake_case = load_decoder(ta_checkpoint['target']['decoder'] , UpperCamelCase__ ) _snake_case = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) _snake_case = SpectrogramDiffusionPipeline( notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=F"{MODEL}/checkpoint_500000", type=str, required=False, help="""Path to the original jax model checkpoint.""", ) UpperCAmelCase_ = parser.parse_args() main(args)
361
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
295
0
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''char''' lowerCAmelCase_ = '''bpe''' lowerCAmelCase_ = '''wp''' UpperCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ['''image_processor''', '''char_tokenizer'''] lowerCAmelCase_ = '''ViTImageProcessor''' lowerCAmelCase_ = '''MgpstrTokenizer''' def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Optional[int]: _snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __a , ) _snake_case = kwargs.pop('feature_extractor' ) _snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) _snake_case = tokenizer _snake_case = AutoTokenizer.from_pretrained('gpt2' ) _snake_case = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(__a , __a ) def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> int: if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: _snake_case = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None: _snake_case = self.char_tokenizer(__a , return_tensors=__a , **__a ) if text is None: return inputs elif images is None: return encodings else: _snake_case = encodings['input_ids'] return inputs def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case , _snake_case , _snake_case = sequences _snake_case = char_preds.size(0 ) _snake_case , _snake_case = self._decode_helper(__a , 'char' ) _snake_case , _snake_case = self._decode_helper(__a , 'bpe' ) _snake_case , _snake_case = self._decode_helper(__a , 'wp' ) _snake_case = [] _snake_case = [] for i in range(__a ): _snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] _snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] _snake_case = scores.index(max(__a ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _snake_case = {} _snake_case = final_strs _snake_case = final_scores _snake_case = char_strs _snake_case = bpe_strs _snake_case = wp_strs return out def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: if format == DecodeType.CHARACTER: _snake_case = self.char_decode _snake_case = 1 _snake_case = '[s]' elif format == DecodeType.BPE: _snake_case = self.bpe_decode _snake_case = 2 _snake_case = '#' elif format == DecodeType.WORDPIECE: _snake_case = self.wp_decode _snake_case = 102 _snake_case = '[SEP]' else: raise ValueError(F'''Format {format} is not supported.''' ) _snake_case , _snake_case = [], [] _snake_case = pred_logits.size(0 ) _snake_case = pred_logits.size(1 ) _snake_case , _snake_case = pred_logits.topk(1 , dim=-1 , largest=__a , sorted=__a ) _snake_case = preds_index.view(-1 , __a )[:, 1:] _snake_case = decoder(__a ) _snake_case , _snake_case = torch.nn.functional.softmax(__a , dim=2 ).max(dim=2 ) _snake_case = preds_max_prob[:, 1:] for index in range(__a ): _snake_case = preds_str[index].find(__a ) _snake_case = preds_str[index][:pred_eos] _snake_case = preds_index[index].cpu().tolist() _snake_case = pred_index.index(__a ) if eos_token in pred_index else -1 _snake_case = preds_max_prob[index][: pred_eos_index + 1] _snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__a ) conf_scores.append(__a ) return dec_strs, conf_scores def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _snake_case = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(__a )] return decode_strs def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: return self.bpe_tokenizer.batch_decode(__a ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(__a )] return decode_strs
362
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> Any: return len(self.encoder ) def lowerCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
295
0
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> List[Any]: '''simple docstring''' _snake_case = 0 if start < end: _snake_case = randint(UpperCAmelCase__ , UpperCAmelCase__ ) _snake_case = a[end] _snake_case = a[pivot] _snake_case = temp _snake_case , _snake_case = _in_place_partition(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) count += _in_place_quick_sort(UpperCAmelCase__ , UpperCAmelCase__ , p - 1 ) count += _in_place_quick_sort(UpperCAmelCase__ , p + 1 , UpperCAmelCase__ ) return count def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]: '''simple docstring''' _snake_case = 0 _snake_case = randint(UpperCAmelCase__ , UpperCAmelCase__ ) _snake_case = a[end] _snake_case = a[pivot] _snake_case = temp _snake_case = start - 1 for index in range(UpperCAmelCase__ , UpperCAmelCase__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case = new_pivot_index + 1 _snake_case = a[new_pivot_index] _snake_case = a[index] _snake_case = temp _snake_case = a[new_pivot_index + 1] _snake_case = a[end] _snake_case = temp return new_pivot_index + 1, count UpperCAmelCase_ = TemporaryFile() UpperCAmelCase_ = 100 # 1000 elements are to be sorted UpperCAmelCase_ , UpperCAmelCase_ = 0, 1 # mean and standard deviation UpperCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array UpperCAmelCase_ = np.load(outfile) UpperCAmelCase_ = len(M) - 1 UpperCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
363
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase_ = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: _snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) else: _snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) _snake_case = ['key_proj', 'value_proj', 'query_proj'] _snake_case = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: _snake_case = key.split('.' ) if attributes[0] == "lm_head": _snake_case = prophet _snake_case = prophet_old else: _snake_case = prophet.prophetnet _snake_case = prophet_old.model _snake_case = False for attribute in attributes: if attribute in mapping: _snake_case = mapping[attribute] if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0: _snake_case = attribute elif hasattr(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _snake_case = old_model.weight logger.info(F'''{attribute} is initialized.''' ) _snake_case = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _snake_case = old_model.bias logger.info(F'''{attribute} is initialized''' ) _snake_case = True break elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ): _snake_case = old_model.in_proj_weight.shape[0] // 3 _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _snake_case = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." _snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] ) _snake_case = True break if attribute.isdigit(): _snake_case = model[int(UpperCamelCase__ )] _snake_case = old_model[int(UpperCamelCase__ )] else: _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if old_attribute == "": _snake_case = old_model else: if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
295
0
from __future__ import annotations from scipy.special import comb # type: ignore class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ ) -> int: _snake_case = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. _snake_case = len(_A ) - 1 def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: assert 0 <= t <= 1, "Time t must be between 0 and 1." _snake_case = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _A ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_A ) , 5 ) == 1 return output_values def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: assert 0 <= t <= 1, "Time t must be between 0 and 1." _snake_case = self.basis_function(_A ) _snake_case = 0.0 _snake_case = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def lowerCAmelCase ( self , lowerCAmelCase_ = 0.01 ) -> Tuple: from matplotlib import pyplot as plt # type: ignore _snake_case = [] # x coordinates of points to plot _snake_case = [] # y coordinates of points to plot _snake_case = 0.0 while t <= 1: _snake_case = self.bezier_curve_function(_A ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size _snake_case = [i[0] for i in self.list_of_points] _snake_case = [i[1] for i in self.list_of_points] plt.plot( _A , _A , color='blue' , label='Curve of Degree ' + str(self.degree ) , ) plt.scatter(_A , _A , color='red' , label='Control Points' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
364
import random def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict: '''simple docstring''' _snake_case = {i: [] for i in range(UpperCamelCase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCamelCase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCamelCase__ ): for j in range(i + 1 , UpperCamelCase__ ): if random.random() < probability: graph[i].append(UpperCamelCase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCamelCase__ ) return graph def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict: '''simple docstring''' return { i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
295
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class UpperCamelCase_ ( a__ ): lowerCAmelCase_ = "data2vec-vision" def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=224 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=True , lowerCAmelCase_=[3, 5, 7, 11] , lowerCAmelCase_=[1, 2, 3, 6] , lowerCAmelCase_=True , lowerCAmelCase_=0.4 , lowerCAmelCase_=256 , lowerCAmelCase_=1 , lowerCAmelCase_=False , lowerCAmelCase_=255 , **lowerCAmelCase_ , ) -> Optional[int]: super().__init__(**SCREAMING_SNAKE_CASE_ ) _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = use_mask_token _snake_case = use_absolute_position_embeddings _snake_case = use_relative_position_bias _snake_case = use_shared_relative_position_bias _snake_case = layer_scale_init_value _snake_case = drop_path_rate _snake_case = use_mean_pooling # decode head attributes (semantic segmentation) _snake_case = out_indices _snake_case = pool_scales # auxiliary head attributes (semantic segmentation) _snake_case = use_auxiliary_head _snake_case = auxiliary_loss_weight _snake_case = auxiliary_channels _snake_case = auxiliary_num_convs _snake_case = auxiliary_concat_input _snake_case = semantic_loss_ignore_index class UpperCamelCase_ ( a__ ): lowerCAmelCase_ = version.parse('''1.11''' ) @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCAmelCase ( self ) -> float: return 1E-4
365
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = encoder_stride _snake_case = num_attention_outputs _snake_case = embed_dim _snake_case = embed_dim + 1 _snake_case = resolution _snake_case = depths _snake_case = hidden_sizes _snake_case = dim _snake_case = mlp_expansion_ratio def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self ) -> Tuple: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = self.type_sequence_label_size _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def lowerCAmelCase ( self ) -> Optional[Any]: pass def lowerCAmelCase ( self ) -> str: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): _snake_case = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: _snake_case = seq_length * self.model_tester.chunk_length else: _snake_case = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _snake_case = outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> str: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): _snake_case = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase ( self ) -> Dict: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _snake_case = model_class(lowerCAmelCase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _snake_case = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _snake_case = model(lowerCAmelCase_ ) self.assertTrue(outputs_dict is not None ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self ) -> Dict: return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
295
0
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = (1 - _cos) / 2 _snake_case = 1 - _cos _snake_case = 1 + alpha _snake_case = -2 * _cos _snake_case = 1 - alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = (1 + _cos) / 2 _snake_case = -1 - _cos _snake_case = 1 + alpha _snake_case = -2 * _cos _snake_case = 1 - alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = _sin / 2 _snake_case = 0 _snake_case = -ba _snake_case = 1 + alpha _snake_case = -2 * _cos _snake_case = 1 - alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = 1 - alpha _snake_case = -2 * _cos _snake_case = 1 + alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = 10 ** (gain_db / 40) _snake_case = 1 + alpha * big_a _snake_case = -2 * _cos _snake_case = 1 - alpha * big_a _snake_case = 1 + alpha / big_a _snake_case = -2 * _cos _snake_case = 1 - alpha / big_a _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = 10 ** (gain_db / 40) _snake_case = (big_a + 1) - (big_a - 1) * _cos _snake_case = (big_a + 1) + (big_a - 1) * _cos _snake_case = (big_a - 1) - (big_a + 1) * _cos _snake_case = (big_a - 1) + (big_a + 1) * _cos _snake_case = 2 * sqrt(lowerCAmelCase__ ) * alpha _snake_case = big_a * (pmc + aaa) _snake_case = 2 * big_a * mpc _snake_case = big_a * (pmc - aaa) _snake_case = ppmc + aaa _snake_case = -2 * pmpc _snake_case = ppmc - aaa _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter: '''simple docstring''' _snake_case = tau * frequency / samplerate _snake_case = sin(lowerCAmelCase__ ) _snake_case = cos(lowerCAmelCase__ ) _snake_case = _sin / (2 * q_factor) _snake_case = 10 ** (gain_db / 40) _snake_case = (big_a + 1) - (big_a - 1) * _cos _snake_case = (big_a + 1) + (big_a - 1) * _cos _snake_case = (big_a - 1) - (big_a + 1) * _cos _snake_case = (big_a - 1) + (big_a + 1) * _cos _snake_case = 2 * sqrt(lowerCAmelCase__ ) * alpha _snake_case = big_a * (ppmc + aaa) _snake_case = -2 * big_a * pmpc _snake_case = big_a * (ppmc - aaa) _snake_case = pmc + aaa _snake_case = 2 * mpc _snake_case = pmc - aaa _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
366
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = LEDTokenizer lowerCAmelCase_ = LEDTokenizerFast lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> List[str]: super().setUp() _snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _snake_case = {'unk_token': '<unk>'} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self ) -> Optional[Any]: return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def lowerCAmelCase ( self ) -> Union[str, Any]: return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIn('input_ids' , lowerCAmelCase_ ) self.assertIn('attention_mask' , lowerCAmelCase_ ) self.assertNotIn('labels' , lowerCAmelCase_ ) self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.'] _snake_case = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' ) _snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' ) _snake_case = inputs['input_ids'] _snake_case = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = ['Summary of the text.', 'Another summary.'] _snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']] _snake_case = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Tuple: pass def lowerCAmelCase ( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = 'A, <mask> AllenNLP sentence.' _snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
295
0
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : str = None ) -> List[str]: '''simple docstring''' if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) _snake_case = nums[0] for i in range(1 , len(UpperCamelCase__ ) ): _snake_case = nums[i] _snake_case = max(UpperCamelCase__ , ans + num , UpperCamelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user UpperCAmelCase_ = int(input("""Enter number of elements : """).strip()) UpperCAmelCase_ = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
367
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = BertTokenizer lowerCAmelCase_ = BertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english def lowerCAmelCase ( self ) -> Optional[int]: super().setUp() _snake_case = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = 'UNwant\u00E9d,running' _snake_case = 'unwanted, running' return input_text, output_text def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.tokenizer_class(self.vocab_file ) _snake_case = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def lowerCAmelCase ( self ) -> Tuple: if not self.test_rust_tokenizer: return _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing _snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase ( self ) -> Any: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Dict: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer() _snake_case = 'a\n\'ll !!to?\'d of, can\'t.' _snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _snake_case = {} for i, token in enumerate(lowerCAmelCase_ ): _snake_case = i _snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase ( self ) -> Tuple: self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase ( self ) -> Dict: self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase ( self ) -> int: self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' ) _snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def lowerCAmelCase ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _snake_case = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) _snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False _snake_case = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase ( self ) -> str: _snake_case = ['的', '人', '有'] _snake_case = ''.join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = True _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". _snake_case = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
295
0
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def lowerCamelCase__ ( UpperCamelCase__ : bytes , UpperCamelCase__ : int ) -> np.array: '''simple docstring''' _snake_case = F'''{sampling_rate}''' _snake_case = '1' _snake_case = 'f32le' _snake_case = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(snake_case_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _snake_case = ffmpeg_process.communicate(snake_case_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _snake_case = output_stream[0] _snake_case = np.frombuffer(snake_case_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : str = "f32le" , ) -> Union[str, Any]: '''simple docstring''' _snake_case = F'''{sampling_rate}''' _snake_case = '1' if format_for_conversion == "s16le": _snake_case = 2 elif format_for_conversion == "f32le": _snake_case = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) _snake_case = platform.system() if system == "Linux": _snake_case = 'alsa' _snake_case = 'default' elif system == "Darwin": _snake_case = 'avfoundation' _snake_case = ':0' elif system == "Windows": _snake_case = 'dshow' _snake_case = 'default' _snake_case = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _snake_case = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _snake_case = _ffmpeg_stream(snake_case_ , snake_case_ ) for item in iterator: yield item def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase__ : str = "f32le" , ) -> List[Any]: '''simple docstring''' if stream_chunk_s is not None: _snake_case = stream_chunk_s else: _snake_case = chunk_length_s _snake_case = ffmpeg_microphone(snake_case_ , snake_case_ , format_for_conversion=snake_case_ ) if format_for_conversion == "s16le": _snake_case = np.intaa _snake_case = 2 elif format_for_conversion == "f32le": _snake_case = np.floataa _snake_case = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: _snake_case = chunk_length_s / 6 _snake_case = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(snake_case_ , (int, float) ): _snake_case = [stride_length_s, stride_length_s] _snake_case = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _snake_case = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _snake_case = datetime.datetime.now() _snake_case = datetime.timedelta(seconds=snake_case_ ) for item in chunk_bytes_iter(snake_case_ , snake_case_ , stride=(stride_left, stride_right) , stream=snake_case_ ): # Put everything back in numpy scale _snake_case = np.frombuffer(item['raw'] , dtype=snake_case_ ) _snake_case = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _snake_case = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple[int, int] , UpperCamelCase__ : bool = False ) -> Tuple: '''simple docstring''' _snake_case = b'' _snake_case , _snake_case = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) _snake_case = 0 for raw in iterator: acc += raw if stream and len(snake_case_ ) < chunk_len: _snake_case = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(snake_case_ ) >= chunk_len: # We are flushing the accumulator _snake_case = (_stride_left, stride_right) _snake_case = {'raw': acc[:chunk_len], 'stride': stride} if stream: _snake_case = False yield item _snake_case = stride_left _snake_case = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(snake_case_ ) > stride_left: _snake_case = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _snake_case = False yield item def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int: '''simple docstring''' _snake_case = 2**24 # 16Mo try: with subprocess.Popen(snake_case_ , stdout=subprocess.PIPE , bufsize=snake_case_ ) as ffmpeg_process: while True: _snake_case = ffmpeg_process.stdout.read(snake_case_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
368
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
295
0
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> int: '''simple docstring''' return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict="attention" ) -> int: '''simple docstring''' _snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) _snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) _snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) _snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) _snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) _snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) _snake_case = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) _snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any=False ) -> List[Any]: '''simple docstring''' if split_mlp_wi: _snake_case = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] _snake_case = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] _snake_case = (wi_a, wi_a) else: _snake_case = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] _snake_case = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> Optional[Any]: '''simple docstring''' return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowerCamelCase__ ( UpperCamelCase__ : int , *, UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str = False ) -> Tuple: '''simple docstring''' _snake_case = traverse_util.flatten_dict(variables['target'] ) _snake_case = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _snake_case = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , lowerCAmelCase_ ) _snake_case = collections.OrderedDict() # Shared embeddings. _snake_case = old['token_embedder/embedding'] # Encoder. for i in range(lowerCAmelCase_ ): # Block i, layer 0 (Self Attention). _snake_case = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_attention_layer_norm' ) _snake_case = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'attention' ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 1 (MLP). _snake_case = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_mlp_layer_norm' ) _snake_case = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , lowerCAmelCase_ ) _snake_case = layer_norm if split_mlp_wi: _snake_case = wi[0].T _snake_case = wi[1].T else: _snake_case = wi.T _snake_case = wo.T if scalable_attention: # convert the rel_embedding of each layer _snake_case = tax_relpos_bias_lookup( lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' ).T _snake_case = old['encoder/encoder_norm/scale'] if not scalable_attention: _snake_case = tax_relpos_bias_lookup( lowerCAmelCase_ , 0 , 'encoder' ).T _snake_case = tax_relpos_bias_lookup( lowerCAmelCase_ , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(lowerCAmelCase_ ): # Block i, layer 0 (Self Attention). _snake_case = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_self_attention_layer_norm' ) _snake_case = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'self_attention' ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 1 (Cross Attention). _snake_case = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' ) _snake_case = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'encoder_decoder_attention' ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 2 (MLP). _snake_case = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_mlp_layer_norm' ) _snake_case = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , lowerCAmelCase_ ) _snake_case = layer_norm if split_mlp_wi: _snake_case = wi[0].T _snake_case = wi[1].T else: _snake_case = wi.T _snake_case = wo.T if scalable_attention: # convert the rel_embedding of each layer _snake_case = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' ).T _snake_case = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _snake_case = old['decoder/logits_dense/kernel'].T return new def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Any: '''simple docstring''' _snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _snake_case = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _snake_case = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) _snake_case = state_dict['shared.weight'] return state_dict def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' _snake_case = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) _snake_case = convert_tax_to_pytorch( lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ ) _snake_case = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] = False , UpperCamelCase__ : Tuple = False , ) -> List[str]: '''simple docstring''' _snake_case = MTaConfig.from_json_file(lowerCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _snake_case = UMTaEncoderModel(lowerCAmelCase_ ) else: _snake_case = UMTaForConditionalGeneration(lowerCAmelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(lowerCAmelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(lowerCAmelCase_ ) print('Done' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) UpperCAmelCase_ = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
369
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path UpperCAmelCase_ = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: with TemporaryDirectory() as tmp_dir: _snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) _snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ ) _snake_case = builder_cls( cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , ) _snake_case = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) _snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) self.assertTrue(os.path.exists(lowerCAmelCase_ ) ) @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' _snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _snake_case = None builder_instance.download_and_prepare() _snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) _snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert "train" in ds assert isinstance(ds['train'] , UpperCamelCase__ ) assert next(iter(ds['train'] ) )
295
0
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration _lowercase = pytest.mark.integration _lowercase = {"""comet"""} _lowercase = importlib.util.find_spec("""fairseq""") is not None _lowercase = {"""code_eval"""} _lowercase = os.name == """nt""" _lowercase = {"""bertscore""", """frugalscore""", """perplexity"""} _lowercase = importlib.util.find_spec("""transformers""") is not None def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> Any: '''simple docstring''' @wraps(__SCREAMING_SNAKE_CASE ) def wrapper(self : int , UpperCamelCase__ : str ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('\"test requires Fairseq\"' ) else: test_case(self , __SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' @wraps(__SCREAMING_SNAKE_CASE ) def wrapper(self : Dict , UpperCamelCase__ : Dict ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('\"test requires transformers\"' ) else: test_case(self , __SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' @wraps(__SCREAMING_SNAKE_CASE ) def wrapper(self : List[str] , UpperCamelCase__ : List[str] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('\"test not supported on Windows\"' ) else: test_case(self , __SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase__ ( ) -> Any: '''simple docstring''' _snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __snake_case , __snake_case , __snake_case ) @local class UpperCamelCase_ ( parameterized.TestCase ): lowerCAmelCase_ = {} lowerCAmelCase_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _snake_case = "[...]" _snake_case = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , UpperCamelCase__ ) ).module_path ) _snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__ ) # check parameters _snake_case = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__ ): with self.use_local_metrics(): try: _snake_case = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _snake_case = "[...]" _snake_case = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , UpperCamelCase__ ) ).module_path ) # run doctest with self.use_local_metrics(): _snake_case = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__ ): yield else: yield @contextmanager def lowerCAmelCase ( self ) -> Tuple: def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ): return load_metric(os.path.join('metrics' , UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ ) with patch('datasets.load_metric' ) as mock_load_metric: _snake_case = load_local_metric yield @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ ) -> Tuple: def wrapper(lowerCAmelCase_ ): _snake_case = contextmanager(UpperCamelCase__ ) _snake_case = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> int: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class UpperCamelCase_ ( __snake_case ): def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _snake_case = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> str: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase__ : Tuple , UpperCamelCase__ : str , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _snake_case = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> List[str]: '''simple docstring''' def load_from_checkpoint(UpperCamelCase__ : int ): class UpperCamelCase_ : def lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: assert len(UpperCamelCase__ ) == 2 _snake_case = [0.19, 0.92] return scores, sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _snake_case = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _snake_case = load_from_checkpoint yield def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = load_metric(os.path.join('metrics' , 'seqeval' ) ) _snake_case = "ERROR" _snake_case = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=__SCREAMING_SNAKE_CASE )
370
def lowerCamelCase__ ( ) -> int: '''simple docstring''' return 1 def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int: '''simple docstring''' return two_pound(UpperCamelCase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
295
0
from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> List[Any]: # noqa: E741 '''simple docstring''' while r - l > 1: _snake_case = (l + r) // 2 if v[m] >= key: _snake_case = m else: _snake_case = m # noqa: E741 return r def lowerCamelCase__ ( UpperCamelCase__ : list[int] ) -> Union[str, Any]: '''simple docstring''' if len(__lowerCAmelCase ) == 0: return 0 _snake_case = [0] * len(__lowerCAmelCase ) _snake_case = 1 _snake_case = v[0] for i in range(1 , len(__lowerCAmelCase ) ): if v[i] < tail[0]: _snake_case = v[i] elif v[i] > tail[length - 1]: _snake_case = v[i] length += 1 else: _snake_case = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
371
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple: '''simple docstring''' _snake_case = [0 for i in range(r + 1 )] # nc0 = 1 _snake_case = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _snake_case = min(UpperCamelCase__ , UpperCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
295
0
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , ) -> Union[str, Any]: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_attention_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_choices def lowerCAmelCase ( self ) -> Any: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_attention_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase_ , ) return config, input_ids, attention_mask def lowerCAmelCase ( self ) -> Dict: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self ) -> str: _snake_case = FlaxDistilBertModelTester(self ) @slow def lowerCAmelCase ( self ) -> Any: for model_class_name in self.all_model_classes: _snake_case = model_class_name.from_pretrained('distilbert-base-uncased' ) _snake_case = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase_ ) @require_flax class UpperCamelCase_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self ) -> int: _snake_case = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) _snake_case = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] _snake_case = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
350
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]: super().__init__() _snake_case = nn.Sequential( nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , ) _snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.ModuleList() for lyr_num in range(lowerCAmelCase_ ): # FiLM conditional T5 decoder _snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) self.decoders.append(lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case , _snake_case , _snake_case = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _snake_case = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _snake_case = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _snake_case = torch.broadcast_to( torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _snake_case = self.position_encoding(lowerCAmelCase_ ) _snake_case = self.continuous_inputs_projection(lowerCAmelCase_ ) inputs += position_encodings _snake_case = self.dropout(lowerCAmelCase_ ) # decoder: No padding present. _snake_case = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings _snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _snake_case = lyr( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0] _snake_case = self.decoder_norm(lowerCAmelCase_ ) _snake_case = self.post_dropout(lowerCAmelCase_ ) _snake_case = self.spec_out(lowerCAmelCase_ ) return spec_out class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple: super().__init__() _snake_case = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple: _snake_case = self.layer[0]( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) if encoder_hidden_states is not None: _snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _snake_case = self.layer[1]( lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) # Apply Film Conditional Feed Forward layer _snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ ) return (hidden_states,) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: super().__init__() _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str: # pre_self_attention_layer_norm _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ ) # Self-attention block _snake_case = self.attention(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__() _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict: _snake_case = self.layer_norm(lowerCAmelCase_ ) _snake_case = self.attention( lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return layer_output class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: super().__init__() _snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]: _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.DenseReluDense(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) _snake_case = NewGELUActivation() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any: _snake_case = self.act(self.wi_a(lowerCAmelCase_ ) ) _snake_case = self.wi_a(lowerCAmelCase_ ) _snake_case = hidden_gelu * hidden_linear _snake_case = self.dropout(lowerCAmelCase_ ) _snake_case = self.wo(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str: super().__init__() _snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) ) _snake_case = eps def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ ) _snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _snake_case = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class UpperCamelCase_ ( nn.Module ): def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) )) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = self.scale_bias(lowerCAmelCase_ ) _snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 ) _snake_case = x * (1 + scale) + shift return x
295
0
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowerCamelCase__ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )] return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )] def lowerCamelCase__ ( UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : float = 10**-1 ) -> bool: '''simple docstring''' _snake_case = cross(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = sum(UpperCamelCase__ ) return abs(UpperCamelCase__ ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase_ = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase_ = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase_ = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) UpperCAmelCase_ = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
351
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''gpt_neo''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple: _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_layers _snake_case = num_heads _snake_case = intermediate_size _snake_case = window_size _snake_case = activation_function _snake_case = resid_dropout _snake_case = embed_dropout _snake_case = attention_dropout _snake_case = classifier_dropout _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = use_cache _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = attention_types _snake_case = self.expand_attention_types_params(lowerCAmelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' import torch _snake_case = input.size() _snake_case = len(UpperCamelCase__ ) _snake_case = shape[dimension] _snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ ) _snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1 _snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None] _snake_case = [slice(UpperCamelCase__ )] * rank _snake_case = indices _snake_case = input[s] _snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' import torch _snake_case = torch.arange(1 , UpperCamelCase__ ) _snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = remainders == 0 _snake_case = candidates[divisor_indices] _snake_case = torch.max(UpperCamelCase__ ) return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' ) class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) _snake_case = {0: 'batch', 1: 'past_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCAmelCase ( self ) -> int: return self._config.num_heads def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: _snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) # We need to order the input in the way they appears in the forward() _snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers ) ] _snake_case = common_inputs['attention_mask'] if self.use_past: _snake_case = ordered_inputs['attention_mask'].dtype _snake_case = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self ) -> int: return 13
295
0
from __future__ import annotations UpperCAmelCase_ = [True] * 1000001 UpperCAmelCase_ = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): UpperCAmelCase_ = False i += 1 def lowerCamelCase__ ( UpperCamelCase__ : int ) -> bool: '''simple docstring''' return seive[n] def lowerCamelCase__ ( UpperCamelCase__ : int ) -> bool: '''simple docstring''' return any(digit in '02468' for digit in str(UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : int = 1_000_000 ) -> list[int]: '''simple docstring''' _snake_case = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(UpperCamelCase__ ) and not contains_an_even_digit(UpperCamelCase__ ): _snake_case = str(UpperCamelCase__ ) _snake_case = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCamelCase__ ) )] if all(is_prime(UpperCamelCase__ ) for i in list_nums ): result.append(UpperCamelCase__ ) return result def lowerCamelCase__ ( ) -> int: '''simple docstring''' return len(find_circular_primes() ) if __name__ == "__main__": print(F"{len(find_circular_primes()) = }")
352
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' _snake_case , _snake_case = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _snake_case = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
295
0
class UpperCamelCase_ : # Public class to implement a graph def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: _snake_case = row _snake_case = col _snake_case = graph def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: # Checking all 8 elements surrounding nth element _snake_case = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order _snake_case = [-1, 0, 1, -1, 1, -1, 0, 1] _snake_case = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> int: # And finally, count all islands. _snake_case = [[False for j in range(self.COL )] for i in range(self.ROW )] _snake_case = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += 1 return count
353
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' _snake_case = VideoMAEConfig() set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ ) if "finetuned" not in model_name: _snake_case = False if "finetuned" in model_name: _snake_case = 'huggingface/label-files' if "kinetics" in model_name: _snake_case = 400 _snake_case = 'kinetics400-id2label.json' elif "ssv2" in model_name: _snake_case = 174 _snake_case = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) _snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) _snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int: '''simple docstring''' if "small" in model_name: _snake_case = 384 _snake_case = 1_536 _snake_case = 12 _snake_case = 16 _snake_case = 12 _snake_case = 3 _snake_case = 192 _snake_case = 768 elif "large" in model_name: _snake_case = 1_024 _snake_case = 4_096 _snake_case = 24 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 512 _snake_case = 2_048 elif "huge" in model_name: _snake_case = 1_280 _snake_case = 5_120 _snake_case = 32 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 640 _snake_case = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' if "encoder." in name: _snake_case = name.replace('encoder.' , '' ) if "cls_token" in name: _snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: _snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: _snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: _snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: _snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: _snake_case = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: _snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: _snake_case = name.replace('attn' , 'attention.self' ) if "attn" in name: _snake_case = name.replace('attn' , 'attention.attention' ) if "norm1" in name: _snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: _snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: _snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: _snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: _snake_case = name.replace('head' , 'classifier' ) return name def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): _snake_case = orig_state_dict.pop(UpperCamelCase__ ) if key.startswith('encoder.' ): _snake_case = key.replace('encoder.' , '' ) if "qkv" in key: _snake_case = key.split('.' ) if key.startswith('decoder.blocks' ): _snake_case = config.decoder_hidden_size _snake_case = int(key_split[2] ) _snake_case = 'decoder.decoder_layers.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = config.hidden_size _snake_case = int(key_split[1] ) _snake_case = 'videomae.encoder.layer.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = val return orig_state_dict def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _snake_case = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) _snake_case = np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' _snake_case = get_videomae_config(UpperCamelCase__ ) if "finetuned" in model_name: _snake_case = VideoMAEForVideoClassification(UpperCamelCase__ ) else: _snake_case = VideoMAEForPreTraining(UpperCamelCase__ ) # download original checkpoint, hosted on Google Drive _snake_case = 'pytorch_model.bin' gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' ) if "model" in files: _snake_case = files['model'] else: _snake_case = files['module'] _snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify model on basic input _snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _snake_case = prepare_video() _snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' ) if "finetuned" not in model_name: _snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) _snake_case = torch.load(UpperCamelCase__ ) _snake_case = model(**UpperCamelCase__ ) _snake_case = outputs.logits _snake_case = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one _snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": _snake_case = outputs.loss assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(UpperCamelCase__ , organization='nielsr' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
295
0
from __future__ import annotations from typing import Any class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 ) -> None: _snake_case , _snake_case = row, column _snake_case = [[default_value for c in range(lowerCAmelCase_ )] for r in range(lowerCAmelCase_ )] def __str__( self ) -> str: _snake_case = F'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier _snake_case = 0 for row_vector in self.array: for obj in row_vector: _snake_case = max(lowerCAmelCase_ , len(str(lowerCAmelCase_ ) ) ) _snake_case = F'''%{max_element_length}s''' # Make string and return def single_line(lowerCAmelCase_ ) -> str: nonlocal string_format_identifier _snake_case = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCAmelCase_ ) for row_vector in self.array ) return s def __repr__( self ) -> str: return str(self ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> bool: if not (isinstance(lowerCAmelCase_ , (list, tuple) ) and len(lowerCAmelCase_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCAmelCase_ ) -> Any: assert self.validate_indicies(lowerCAmelCase_ ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: assert self.validate_indicies(lowerCAmelCase_ ) _snake_case = value def __add__( self , lowerCAmelCase_ ) -> Matrix: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) assert self.row == another.row and self.column == another.column # Add _snake_case = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _snake_case = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: _snake_case = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _snake_case = -self[r, c] return result def __sub__( self , lowerCAmelCase_ ) -> Matrix: return self + (-another) def __mul__( self , lowerCAmelCase_ ) -> Matrix: if isinstance(lowerCAmelCase_ , (int, float) ): # Scalar multiplication _snake_case = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _snake_case = self[r, c] * another return result elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # Matrix multiplication assert self.column == another.row _snake_case = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: _snake_case = F'''Unsupported type given for another ({type(lowerCAmelCase_ )})''' raise TypeError(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Matrix: _snake_case = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): _snake_case = self[r, c] return result def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate _snake_case = v.transpose() _snake_case = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = Matrix(3 , 3 , 0 ) for i in range(3 ): _snake_case = 1 print(F'''a^(-1) is {ainv}''' ) # u, v _snake_case = Matrix(3 , 1 , 0 ) _snake_case , _snake_case , _snake_case = 1, 2, -3 _snake_case = Matrix(3 , 1 , 0 ) _snake_case , _snake_case , _snake_case = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' ) def lowerCamelCase__ ( ) -> None: '''simple docstring''' import doctest doctest.testmod() testa()
354
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
295
0
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Dict: '''simple docstring''' return 1 / (1 + np.exp(-z )) def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> str: '''simple docstring''' return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean() def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> int: '''simple docstring''' _snake_case = np.dot(UpperCamelCase__ , UpperCamelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) ) def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any=70_000 ) -> Tuple: '''simple docstring''' _snake_case = np.zeros(x.shape[1] ) for iterations in range(UpperCamelCase__ ): _snake_case = np.dot(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = sigmoid_function(UpperCamelCase__ ) _snake_case = np.dot(x.T , h - y ) / y.size _snake_case = theta - alpha * gradient # updating the weights _snake_case = np.dot(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = sigmoid_function(UpperCamelCase__ ) _snake_case = cost_function(UpperCamelCase__ , UpperCamelCase__ ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase_ = datasets.load_iris() UpperCAmelCase_ = iris.data[:, :2] UpperCAmelCase_ = (iris.target != 0) * 1 UpperCAmelCase_ = 0.1 UpperCAmelCase_ = logistic_reg(alpha, x, y, max_iterations=70000) print("""theta: """, theta) # printing the theta i.e our weights vector def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Any: '''simple docstring''' return sigmoid_function( np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((UpperCAmelCase_) , (UpperCAmelCase_)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase_) , (UpperCAmelCase_)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase_) , (UpperCAmelCase_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase_ = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase_ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
355
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_lowerCamelCase ) class UpperCamelCase_ ( _lowerCamelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCAmelCase_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCAmelCase_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) lowerCAmelCase_ = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) lowerCAmelCase_ = '''question''' lowerCAmelCase_ = '''context''' lowerCAmelCase_ = '''answers''' @property def lowerCAmelCase ( self ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
356
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' _snake_case = OmegaConf.load(UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] _snake_case = list(state_dict.keys() ) # extract state_dict for VQVAE _snake_case = {} _snake_case = 'first_stage_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] # extract state_dict for UNetLDM _snake_case = {} _snake_case = 'model.diffusion_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] _snake_case = config.model.params.first_stage_config.params _snake_case = config.model.params.unet_config.params _snake_case = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _snake_case = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _snake_case = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) UpperCAmelCase_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
295
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = DebertaTokenizer lowerCAmelCase_ = True lowerCAmelCase_ = DebertaTokenizerFast def lowerCAmelCase ( self ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '[UNK]', ] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _snake_case = {'unk_token': '[UNK]'} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = 'lower newer' _snake_case = 'lower newer' return input_text, output_text def lowerCAmelCase ( self ) -> Tuple: _snake_case = self.get_tokenizer() _snake_case = 'lower newer' _snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.get_tokenizer() _snake_case = tokenizer('Hello' , 'World' ) _snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['token_type_ids'] , lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> Any: _snake_case = self.tokenizer_class.from_pretrained('microsoft/deberta-base' ) _snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode( 'sequence builders' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) _snake_case = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: _snake_case = tokenizer_class.from_pretrained('microsoft/deberta-base' ) _snake_case = [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _snake_case = [tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) for seq in encoding['input_ids']] # fmt: off _snake_case = { 'input_ids': [ [1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2] ], 'token_type_ids': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on _snake_case = [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] self.assertDictEqual(encoding.data , lowerCAmelCase_ ) for expected, decoded in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
357
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase_ : @property def lowerCAmelCase ( self ) -> int: return self.get_dummy_input() @property def lowerCAmelCase ( self ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]: _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) return dummy_input def lowerCAmelCase ( self ) -> Tuple: _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) unet_block.to(lowerCAmelCase_ ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowerCAmelCase ( self ) -> Tuple: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() _snake_case = model(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ ) _snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward()
295
0
from math import asin, atan, cos, radians, sin, sqrt, tan UpperCAmelCase_ = 6378137.0 UpperCAmelCase_ = 6356752.314245 UpperCAmelCase_ = 6378137 def lowerCamelCase__ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: '''simple docstring''' _snake_case = (AXIS_A - AXIS_B) / AXIS_A _snake_case = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) ) _snake_case = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) ) _snake_case = radians(UpperCamelCase__ ) _snake_case = radians(UpperCamelCase__ ) # Equation _snake_case = sin((phi_a - phi_a) / 2 ) _snake_case = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _snake_case = sqrt(sin_sq_phi + (cos(UpperCamelCase__ ) * cos(UpperCamelCase__ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
358
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case , _snake_case = 9, 14 # noqa: F841 _snake_case = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case = mst(UpperCamelCase__ ) _snake_case = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case = tuple(answer[:2] ) _snake_case = tuple(edge[::-1] ) assert edge in result or reverse in result
295
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''altclip_text_model''' def __init__( self , lowerCAmelCase_=25_0002 , lowerCAmelCase_=1024 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=4096 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=768 , **lowerCAmelCase_ , ) -> Optional[int]: super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = initializer_factor _snake_case = layer_norm_eps _snake_case = position_embedding_type _snake_case = use_cache _snake_case = project_dim class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''altclip_vision_model''' def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=3072 , lowerCAmelCase_=512 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=224 , lowerCAmelCase_=32 , lowerCAmelCase_="quick_gelu" , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , **lowerCAmelCase_ , ) -> int: super().__init__(**lowerCAmelCase_ ) _snake_case = hidden_size _snake_case = intermediate_size _snake_case = projection_dim _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = num_channels _snake_case = patch_size _snake_case = image_size _snake_case = initializer_range _snake_case = initializer_factor _snake_case = attention_dropout _snake_case = layer_norm_eps _snake_case = hidden_act @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(lowerCAmelCase_ ) _snake_case , _snake_case = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": _snake_case = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''altclip''' lowerCAmelCase_ = True def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=768 , lowerCAmelCase_=2.65_92 , **lowerCAmelCase_ ) -> List[Any]: # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). _snake_case = kwargs.pop('text_config_dict' , lowerCAmelCase_ ) _snake_case = kwargs.pop('vision_config_dict' , lowerCAmelCase_ ) super().__init__(**lowerCAmelCase_ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _snake_case = {} # This is the complete result when using `text_config_dict`. _snake_case = AltCLIPTextConfig(**lowerCAmelCase_ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _snake_case = ( F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' F'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: _snake_case = ( F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' F'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase_ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: _snake_case = {} # This is the complete result when using `vision_config_dict`. _snake_case = AltCLIPVisionConfig(**lowerCAmelCase_ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _snake_case = { str(lowerCAmelCase_ ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _snake_case = ( F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' F'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: _snake_case = ( F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' F'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase_ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: _snake_case = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: _snake_case = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) _snake_case = AltCLIPTextConfig(**lowerCAmelCase_ ) _snake_case = AltCLIPVisionConfig(**lowerCAmelCase_ ) _snake_case = projection_dim _snake_case = logit_scale_init_value _snake_case = 1.0 @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = copy.deepcopy(self.__dict__ ) _snake_case = self.text_config.to_dict() _snake_case = self.vision_config.to_dict() _snake_case = self.__class__.model_type return output
359
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
295
0
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
360
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCamelCase_ ( enum.Enum ): lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 @add_end_docstrings(_lowerCamelCase ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _snake_case = None if self.model.config.prefix is not None: _snake_case = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _snake_case = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params ) _snake_case = {**self._preprocess_params, **preprocess_params} _snake_case = {**self._forward_params, **forward_params} def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple: _snake_case = {} if prefix is not None: _snake_case = prefix if prefix: _snake_case = self.tokenizer( lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) _snake_case = handle_long_generation preprocess_params.update(lowerCAmelCase_ ) _snake_case = generate_kwargs _snake_case = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.TENSORS if return_type is not None: _snake_case = return_type if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if stop_sequence is not None: _snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any: _snake_case = self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prompt_text if handle_long_generation == "hole": _snake_case = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _snake_case = generate_kwargs['max_new_tokens'] else: _snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _snake_case = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) _snake_case = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _snake_case = inputs['attention_mask'][:, -keep_length:] return inputs def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = model_inputs['input_ids'] _snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ ) # Allow empty prompts if input_ids.shape[1] == 0: _snake_case = None _snake_case = None _snake_case = 1 else: _snake_case = input_ids.shape[0] _snake_case = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _snake_case = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: _snake_case = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _snake_case = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = generated_sequence.shape[0] if self.framework == "pt": _snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int: _snake_case = model_outputs['generated_sequence'][0] _snake_case = model_outputs['input_ids'] _snake_case = model_outputs['prompt_text'] _snake_case = generated_sequence.numpy().tolist() _snake_case = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _snake_case = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _snake_case = self.tokenizer.decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _snake_case = 0 else: _snake_case = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) ) if return_type == ReturnType.FULL_TEXT: _snake_case = prompt_text + text[prompt_length:] else: _snake_case = text[prompt_length:] _snake_case = {'generated_text': all_text} records.append(lowerCAmelCase_ ) return records
295
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''facebook/nllb-200-distilled-600M''' lowerCAmelCase_ = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) lowerCAmelCase_ = '''translator''' lowerCAmelCase_ = AutoTokenizer lowerCAmelCase_ = AutoModelForSeqaSeqLM lowerCAmelCase_ = LANGUAGE_CODES lowerCAmelCase_ = ['''text''', '''text''', '''text'''] lowerCAmelCase_ = ['''text'''] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _snake_case = self.lang_to_code[src_lang] _snake_case = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCAmelCase_ , return_tensors='pt' , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.model.generate(**lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_ )
361
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
295
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any]=None ) -> List[Any]: '''simple docstring''' _snake_case = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ ) # The main config parser _snake_case = config_command_parser(UpperCamelCase__ ) # The subparser to add commands to _snake_case = config_parser.add_subparsers(title='subcommands' , dest='subcommand' ) # Then add other parsers with the parent parser default_command_parser(UpperCamelCase__ , parents=[parent_parser] ) update_command_parser(UpperCamelCase__ , parents=[parent_parser] ) return config_parser def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = get_config_parser() _snake_case = config_parser.parse_args() if not hasattr(UpperCamelCase__ , 'func' ): config_parser.print_help() exit(1 ) # Run args.func(UpperCamelCase__ ) if __name__ == "__main__": main()
362
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> Any: return len(self.encoder ) def lowerCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
295
0
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
363
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase_ = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: _snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) else: _snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) _snake_case = ['key_proj', 'value_proj', 'query_proj'] _snake_case = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: _snake_case = key.split('.' ) if attributes[0] == "lm_head": _snake_case = prophet _snake_case = prophet_old else: _snake_case = prophet.prophetnet _snake_case = prophet_old.model _snake_case = False for attribute in attributes: if attribute in mapping: _snake_case = mapping[attribute] if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0: _snake_case = attribute elif hasattr(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _snake_case = old_model.weight logger.info(F'''{attribute} is initialized.''' ) _snake_case = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _snake_case = old_model.bias logger.info(F'''{attribute} is initialized''' ) _snake_case = True break elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ): _snake_case = old_model.in_proj_weight.shape[0] // 3 _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _snake_case = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." _snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] ) _snake_case = True break if attribute.isdigit(): _snake_case = model[int(UpperCamelCase__ )] _snake_case = old_model[int(UpperCamelCase__ )] else: _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if old_attribute == "": _snake_case = old_model else: if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
295
0
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=32 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=[10, 20, 30, 40] , lowerCAmelCase_=[2, 2, 3, 2] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=["stage2", "stage3", "stage4"] , lowerCAmelCase_=[2, 3, 4] , lowerCAmelCase_=None , ) -> int: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = num_labels _snake_case = initializer_range _snake_case = out_features _snake_case = out_indices _snake_case = scope def lowerCAmelCase ( self ) -> Any: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self ) -> List[Any]: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _snake_case = ConvNextModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = ConvNextForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _snake_case = ConvNextBackbone(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case = None _snake_case = ConvNextBackbone(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) lowerCAmelCase_ = ( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> Dict: _snake_case = ConvNextModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self ) -> Tuple: return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def lowerCAmelCase ( self ) -> List[str]: pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def lowerCAmelCase ( self ) -> Any: pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def lowerCAmelCase ( self ) -> int: pass def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> str: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> str: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = ConvNextModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self ) -> Optional[Any]: return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def lowerCAmelCase ( self ) -> Any: _snake_case = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(lowerCAmelCase_ ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): _snake_case = model(**lowerCAmelCase_ ) # verify the logits _snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase , _lowerCamelCase ): lowerCAmelCase_ = (ConvNextBackbone,) if is_torch_available() else () lowerCAmelCase_ = ConvNextConfig lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> Dict: _snake_case = ConvNextModelTester(self )
364
import random def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict: '''simple docstring''' _snake_case = {i: [] for i in range(UpperCamelCase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCamelCase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCamelCase__ ): for j in range(i + 1 , UpperCamelCase__ ): if random.random() < probability: graph[i].append(UpperCamelCase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCamelCase__ ) return graph def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict: '''simple docstring''' return { i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
295
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=33 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_input_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = scope def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_input_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = ids_tensor([self.batch_size] , self.num_choices ) _snake_case = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self ) -> Optional[Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _snake_case = EsmModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = EsmForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _snake_case = self.num_labels _snake_case = EsmForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = False lowerCAmelCase_ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase_ = () lowerCAmelCase_ = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> str: _snake_case = EsmModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case = type self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> Tuple: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = EsmModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> str: _snake_case = self.model_tester.prepare_config_and_inputs()[0] _snake_case = EsmEmbeddings(config=lowerCAmelCase_ ) _snake_case = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) _snake_case = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) _snake_case = create_position_ids_from_input_ids(lowerCAmelCase_ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs()[0] _snake_case = EsmEmbeddings(config=lowerCAmelCase_ ) _snake_case = torch.empty(2 , 4 , 30 ) _snake_case = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] _snake_case = torch.as_tensor([expected_single_positions, expected_single_positions] ) _snake_case = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase_ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase ( self ) -> Optional[int]: pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCAmelCase ( self ) -> List[str]: pass @require_torch class UpperCamelCase_ ( _lowerCamelCase ): @slow def lowerCAmelCase ( self ) -> List[Any]: with torch.no_grad(): _snake_case = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _snake_case = model(lowerCAmelCase_ )[0] _snake_case = 33 _snake_case = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> Optional[Any]: with torch.no_grad(): _snake_case = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _snake_case = model(lowerCAmelCase_ )[0] # compare the actual values for a slice. _snake_case = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
365
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = encoder_stride _snake_case = num_attention_outputs _snake_case = embed_dim _snake_case = embed_dim + 1 _snake_case = resolution _snake_case = depths _snake_case = hidden_sizes _snake_case = dim _snake_case = mlp_expansion_ratio def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self ) -> Tuple: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = self.type_sequence_label_size _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def lowerCAmelCase ( self ) -> Optional[Any]: pass def lowerCAmelCase ( self ) -> str: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): _snake_case = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: _snake_case = seq_length * self.model_tester.chunk_length else: _snake_case = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _snake_case = outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> str: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): _snake_case = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase ( self ) -> Dict: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _snake_case = model_class(lowerCAmelCase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _snake_case = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _snake_case = model(lowerCAmelCase_ ) self.assertTrue(outputs_dict is not None ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self ) -> Dict: return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
295
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} UpperCAmelCase_ = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } UpperCAmelCase_ = { """allenai/longformer-base-4096""": 4096, """allenai/longformer-large-4096""": 4096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase__ ( ) -> Dict: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Any: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Optional[int]: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> int: return len(self.encoder ) def lowerCAmelCase ( self ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
366
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = LEDTokenizer lowerCAmelCase_ = LEDTokenizerFast lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> List[str]: super().setUp() _snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _snake_case = {'unk_token': '<unk>'} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self ) -> Optional[Any]: return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def lowerCAmelCase ( self ) -> Union[str, Any]: return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIn('input_ids' , lowerCAmelCase_ ) self.assertIn('attention_mask' , lowerCAmelCase_ ) self.assertNotIn('labels' , lowerCAmelCase_ ) self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.'] _snake_case = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' ) _snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' ) _snake_case = inputs['input_ids'] _snake_case = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = ['Summary of the text.', 'Another summary.'] _snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']] _snake_case = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Tuple: pass def lowerCAmelCase ( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = 'A, <mask> AllenNLP sentence.' _snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
295
0
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=99 , lowerCAmelCase_=13 , lowerCAmelCase_=16 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=30 , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=None , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = decoder_seq_length # For common tests _snake_case = self.decoder_seq_length _snake_case = is_training _snake_case = use_attention_mask _snake_case = use_labels _snake_case = vocab_size _snake_case = d_model _snake_case = d_model _snake_case = decoder_layers _snake_case = decoder_layers _snake_case = decoder_ffn_dim _snake_case = decoder_attention_heads _snake_case = decoder_attention_heads _snake_case = eos_token_id _snake_case = bos_token_id _snake_case = pad_token_id _snake_case = decoder_start_token_id _snake_case = use_cache _snake_case = max_position_embeddings _snake_case = None _snake_case = decoder_seq_length _snake_case = 2 _snake_case = 1 def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _snake_case = None if self.use_attention_mask: _snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _snake_case = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> List[str]: _snake_case = True _snake_case = TrOCRDecoder(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval() _snake_case = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _snake_case = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) ) self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) + 1 ) _snake_case = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids _snake_case = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) _snake_case = model(lowerCAmelCase_ )['last_hidden_state'] _snake_case = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )['last_hidden_state'] # select random slice _snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() _snake_case = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _snake_case = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase_ = True lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> Tuple: _snake_case = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCAmelCase_ ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: pass def lowerCAmelCase ( self ) -> str: pass def lowerCAmelCase ( self ) -> int: pass def lowerCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def lowerCAmelCase ( self ) -> Optional[int]: pass
367
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = BertTokenizer lowerCAmelCase_ = BertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english def lowerCAmelCase ( self ) -> Optional[int]: super().setUp() _snake_case = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = 'UNwant\u00E9d,running' _snake_case = 'unwanted, running' return input_text, output_text def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.tokenizer_class(self.vocab_file ) _snake_case = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def lowerCAmelCase ( self ) -> Tuple: if not self.test_rust_tokenizer: return _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing _snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase ( self ) -> Any: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Dict: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer() _snake_case = 'a\n\'ll !!to?\'d of, can\'t.' _snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _snake_case = {} for i, token in enumerate(lowerCAmelCase_ ): _snake_case = i _snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase ( self ) -> Tuple: self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase ( self ) -> Dict: self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase ( self ) -> int: self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' ) _snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def lowerCAmelCase ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _snake_case = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) _snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False _snake_case = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase ( self ) -> str: _snake_case = ['的', '人', '有'] _snake_case = ''.join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = True _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". _snake_case = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
295
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCAmelCase_ = { """configuration_audio_spectrogram_transformer""": [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ASTConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ASTForAudioClassification""", """ASTModel""", """ASTPreTrainedModel""", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""ASTFeatureExtractor"""] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
368
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
295
0
from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : int ) -> list[int]: '''simple docstring''' _snake_case = 2 _snake_case = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(UpperCamelCase__ ) if n > 1: factors.append(UpperCamelCase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
369
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path UpperCAmelCase_ = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: with TemporaryDirectory() as tmp_dir: _snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) _snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ ) _snake_case = builder_cls( cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , ) _snake_case = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) _snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) self.assertTrue(os.path.exists(lowerCAmelCase_ ) ) @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' _snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _snake_case = None builder_instance.download_and_prepare() _snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) _snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert "train" in ds assert isinstance(ds['train'] , UpperCamelCase__ ) assert next(iter(ds['train'] ) )
295
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase = { """configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""], """tokenization_tapas""": ["""TapasTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TapasForMaskedLM""", """TapasForQuestionAnswering""", """TapasForSequenceClassification""", """TapasModel""", """TapasPreTrainedModel""", """load_tf_weights_in_tapas""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFTapasForMaskedLM""", """TFTapasForQuestionAnswering""", """TFTapasForSequenceClassification""", """TFTapasModel""", """TFTapasPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
370
def lowerCamelCase__ ( ) -> int: '''simple docstring''' return 1 def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int: '''simple docstring''' return two_pound(UpperCamelCase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
295
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Dict: '''simple docstring''' _snake_case = 384 if "tiny" in model_name: _snake_case = [3, 3, 9, 3] _snake_case = [96, 192, 384, 768] if "small" in model_name: _snake_case = [3, 3, 27, 3] _snake_case = [96, 192, 384, 768] if "base" in model_name: _snake_case = [3, 3, 27, 3] _snake_case = [128, 256, 512, 1_024] _snake_case = 512 if "large" in model_name: _snake_case = [3, 3, 27, 3] _snake_case = [192, 384, 768, 1_536] _snake_case = 768 if "xlarge" in model_name: _snake_case = [3, 3, 27, 3] _snake_case = [256, 512, 1_024, 2_048] _snake_case = 1_024 # set label information _snake_case = 150 _snake_case = 'huggingface/label-files' _snake_case = 'ade20k-id2label.json' _snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) _snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _snake_case = {v: k for k, v in idalabel.items()} _snake_case = ConvNextConfig( depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) _snake_case = UperNetConfig( backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , ) return config def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] ) -> str: '''simple docstring''' _snake_case = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _snake_case = dct.pop(UpperCamelCase__ ) _snake_case = val def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Dict: '''simple docstring''' _snake_case = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } _snake_case = model_name_to_url[model_name] _snake_case = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['state_dict'] _snake_case = get_upernet_config(UpperCamelCase__ ) _snake_case = UperNetForSemanticSegmentation(UpperCamelCase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _snake_case = state_dict.pop(UpperCamelCase__ ) if "bn" in key: _snake_case = key.replace('bn' , 'batch_norm' ) _snake_case = val # rename keys _snake_case = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify on image _snake_case = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' _snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' ) _snake_case = SegformerImageProcessor() _snake_case = processor(UpperCamelCase__ , return_tensors='pt' ).pixel_values with torch.no_grad(): _snake_case = model(UpperCamelCase__ ) if model_name == "upernet-convnext-tiny": _snake_case = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": _snake_case = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": _snake_case = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": _snake_case = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": _snake_case = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[F"upernet-convnext-{size}" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
371
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple: '''simple docstring''' _snake_case = [0 for i in range(r + 1 )] # nc0 = 1 _snake_case = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _snake_case = min(UpperCamelCase__ , UpperCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
295
0
from typing import List import numpy as np def lowerCamelCase__ ( UpperCamelCase__ : dict ) -> int: '''simple docstring''' _snake_case = {key: len(UpperCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) _snake_case = max(lists_lengths.values() , default=0 ) return max(1 , UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> List[range]: '''simple docstring''' _snake_case = [] for group_idx in range(UpperCamelCase__ ): _snake_case = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _snake_case = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _snake_case = range(UpperCamelCase__ , start + num_shards_to_add ) shards_indices_per_group.append(UpperCamelCase__ ) return shards_indices_per_group def lowerCamelCase__ ( UpperCamelCase__ : dict , UpperCamelCase__ : int ) -> List[dict]: '''simple docstring''' _snake_case = _number_of_shards_in_gen_kwargs(UpperCamelCase__ ) if num_shards == 1: return [dict(UpperCamelCase__ )] else: _snake_case = _distribute_shards(num_shards=UpperCamelCase__ , max_num_jobs=UpperCamelCase__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(UpperCamelCase__ ) ) ] def lowerCamelCase__ ( UpperCamelCase__ : List[dict] ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , UpperCamelCase__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowerCamelCase__ ( UpperCamelCase__ : np.random.Generator , UpperCamelCase__ : dict ) -> dict: '''simple docstring''' _snake_case = {len(UpperCamelCase__ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase__ , UpperCamelCase__ )} _snake_case = {} for size in list_sizes: _snake_case = list(range(UpperCamelCase__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _snake_case = dict(UpperCamelCase__ ) for key, value in shuffled_kwargs.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = [value[i] for i in indices_per_size[len(UpperCamelCase__ )]] return shuffled_kwargs
350
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]: super().__init__() _snake_case = nn.Sequential( nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , ) _snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.ModuleList() for lyr_num in range(lowerCAmelCase_ ): # FiLM conditional T5 decoder _snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) self.decoders.append(lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case , _snake_case , _snake_case = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _snake_case = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _snake_case = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _snake_case = torch.broadcast_to( torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _snake_case = self.position_encoding(lowerCAmelCase_ ) _snake_case = self.continuous_inputs_projection(lowerCAmelCase_ ) inputs += position_encodings _snake_case = self.dropout(lowerCAmelCase_ ) # decoder: No padding present. _snake_case = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings _snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _snake_case = lyr( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0] _snake_case = self.decoder_norm(lowerCAmelCase_ ) _snake_case = self.post_dropout(lowerCAmelCase_ ) _snake_case = self.spec_out(lowerCAmelCase_ ) return spec_out class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple: super().__init__() _snake_case = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple: _snake_case = self.layer[0]( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) if encoder_hidden_states is not None: _snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _snake_case = self.layer[1]( lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) # Apply Film Conditional Feed Forward layer _snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ ) return (hidden_states,) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: super().__init__() _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str: # pre_self_attention_layer_norm _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ ) # Self-attention block _snake_case = self.attention(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__() _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict: _snake_case = self.layer_norm(lowerCAmelCase_ ) _snake_case = self.attention( lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return layer_output class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: super().__init__() _snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]: _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.DenseReluDense(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) _snake_case = NewGELUActivation() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any: _snake_case = self.act(self.wi_a(lowerCAmelCase_ ) ) _snake_case = self.wi_a(lowerCAmelCase_ ) _snake_case = hidden_gelu * hidden_linear _snake_case = self.dropout(lowerCAmelCase_ ) _snake_case = self.wo(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str: super().__init__() _snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) ) _snake_case = eps def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ ) _snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _snake_case = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class UpperCamelCase_ ( nn.Module ): def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) )) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = self.scale_bias(lowerCAmelCase_ ) _snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 ) _snake_case = x * (1 + scale) + shift return x
295
0
from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if len(UpperCamelCase__ ) == 0: return False _snake_case = len(UpperCamelCase__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , UpperCamelCase__ ) else: return binary_search(a_list[midpoint + 1 :] , UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = input("""Enter numbers separated by comma:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] UpperCAmelCase_ = int(input("""Enter the number to be found in the list:\n""").strip()) UpperCAmelCase_ = """""" if binary_search(sequence, target) else """not """ print(F"{target} was {not_str}found in {sequence}")
351
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''gpt_neo''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple: _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_layers _snake_case = num_heads _snake_case = intermediate_size _snake_case = window_size _snake_case = activation_function _snake_case = resid_dropout _snake_case = embed_dropout _snake_case = attention_dropout _snake_case = classifier_dropout _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = use_cache _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = attention_types _snake_case = self.expand_attention_types_params(lowerCAmelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' import torch _snake_case = input.size() _snake_case = len(UpperCamelCase__ ) _snake_case = shape[dimension] _snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ ) _snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1 _snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None] _snake_case = [slice(UpperCamelCase__ )] * rank _snake_case = indices _snake_case = input[s] _snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' import torch _snake_case = torch.arange(1 , UpperCamelCase__ ) _snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = remainders == 0 _snake_case = candidates[divisor_indices] _snake_case = torch.max(UpperCamelCase__ ) return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' ) class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) _snake_case = {0: 'batch', 1: 'past_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCAmelCase ( self ) -> int: return self._config.num_heads def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: _snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) # We need to order the input in the way they appears in the forward() _snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers ) ] _snake_case = common_inputs['attention_mask'] if self.use_past: _snake_case = ordered_inputs['attention_mask'].dtype _snake_case = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self ) -> int: return 13
295
0
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument UpperCAmelCase_ = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> List[str]: '''simple docstring''' _snake_case = list(s_dict.keys() ) for key in keys: _snake_case = R'.*/layers_(\d+)' _snake_case = key if re.match(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCamelCase__ ) _snake_case = R'(encoder|decoder)\/' if re.match(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups() if groups[0] == "encoder": _snake_case = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCamelCase__ ) _snake_case = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCamelCase__ ) elif groups[0] == "decoder": _snake_case = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCamelCase__ ) _snake_case = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCamelCase__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: _snake_case = new_key.replace(UpperCamelCase__ , UpperCamelCase__ ) print(F'''{key} -> {new_key}''' ) _snake_case = s_dict.pop(UpperCamelCase__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _snake_case = s_dict[ 'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _snake_case = s_dict[ 'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: _snake_case = s_dict[key].shape[0] _snake_case = s_dict[key] for idx in range(UpperCamelCase__ ): _snake_case = expert_weihts[idx] print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' ) s_dict.pop(UpperCamelCase__ ) return s_dict UpperCAmelCase_ = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]: '''simple docstring''' import regex as re with open(UpperCamelCase__ , 'r' ) as f: _snake_case = f.read() _snake_case = re.findall(R'(.*) = ([0-9.]*)' , UpperCamelCase__ ) _snake_case = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": _snake_case = float(UpperCamelCase__ ) if '.' in value else int(UpperCamelCase__ ) _snake_case = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCamelCase__ )[0] _snake_case = str(activation[1] ) _snake_case = num_experts _snake_case = SwitchTransformersConfig(**UpperCamelCase__ ) return config def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]="./" , UpperCamelCase__ : Dict=8 ) -> List[str]: '''simple docstring''' print(F'''Loading flax weights from : {flax_checkpoint_path}''' ) _snake_case = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) if gin_file is not None: _snake_case = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ ) else: _snake_case = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ ) _snake_case = SwitchTransformersForConditionalGeneration(UpperCamelCase__ ) _snake_case = flax_params['target'] _snake_case = flatten_dict(UpperCamelCase__ , sep='/' ) _snake_case = rename_keys(UpperCamelCase__ ) _snake_case = unflatten_dict(UpperCamelCase__ , sep='/' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ ) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") UpperCAmelCase_ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
352
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' _snake_case , _snake_case = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _snake_case = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
295
0
import tensorflow as tf from ...tf_utils import shape_list class UpperCamelCase_ ( tf.keras.layers.Layer ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> Dict: super().__init__(**lowerCAmelCase_ ) _snake_case = vocab_size _snake_case = d_embed _snake_case = d_proj _snake_case = cutoffs + [vocab_size] _snake_case = [0] + self.cutoffs _snake_case = div_val _snake_case = self.cutoffs[0] _snake_case = len(self.cutoffs ) - 1 _snake_case = self.shortlist_size + self.n_clusters _snake_case = keep_order _snake_case = [] _snake_case = [] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: if self.n_clusters > 0: _snake_case = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' ) _snake_case = self.add_weight( shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: _snake_case = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' , ) self.out_projs.append(lowerCAmelCase_ ) else: self.out_projs.append(lowerCAmelCase_ ) _snake_case = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , ) _snake_case = self.add_weight( shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): _snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1] _snake_case = self.d_embed // (self.div_val**i) _snake_case = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' ) self.out_projs.append(lowerCAmelCase_ ) _snake_case = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , ) _snake_case = self.add_weight( shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]: _snake_case = x if proj is not None: _snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ ) return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b @staticmethod def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = shape_list(lowerCAmelCase_ ) _snake_case = tf.range(lp_size[0] , dtype=target.dtype ) _snake_case = tf.stack([r, target] , 1 ) return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int: _snake_case = 0 if self.n_clusters == 0: _snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: _snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ ) _snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 ) else: _snake_case = shape_list(lowerCAmelCase_ ) _snake_case = [] _snake_case = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): _snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: _snake_case = (target >= l_idx) & (target < r_idx) _snake_case = tf.where(lowerCAmelCase_ ) _snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx if self.div_val == 1: _snake_case = self.out_layers[0][0][l_idx:r_idx] _snake_case = self.out_layers[0][1][l_idx:r_idx] else: _snake_case = self.out_layers[i][0] _snake_case = self.out_layers[i][1] if i == 0: _snake_case = tf.concat([cur_W, self.cluster_weight] , 0 ) _snake_case = tf.concat([cur_b, self.cluster_bias] , 0 ) _snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] ) _snake_case = tf.nn.log_softmax(lowerCAmelCase_ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: _snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ ) else: _snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] ) _snake_case = tf.nn.log_softmax(lowerCAmelCase_ ) _snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster _snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(lowerCAmelCase_ ) if target is not None: _snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) ) _snake_case = tf.concat(lowerCAmelCase_ , axis=-1 ) if target is not None: if return_mean: _snake_case = tf.reduce_mean(lowerCAmelCase_ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(lowerCAmelCase_ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' ) return out
353
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' _snake_case = VideoMAEConfig() set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ ) if "finetuned" not in model_name: _snake_case = False if "finetuned" in model_name: _snake_case = 'huggingface/label-files' if "kinetics" in model_name: _snake_case = 400 _snake_case = 'kinetics400-id2label.json' elif "ssv2" in model_name: _snake_case = 174 _snake_case = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) _snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) _snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int: '''simple docstring''' if "small" in model_name: _snake_case = 384 _snake_case = 1_536 _snake_case = 12 _snake_case = 16 _snake_case = 12 _snake_case = 3 _snake_case = 192 _snake_case = 768 elif "large" in model_name: _snake_case = 1_024 _snake_case = 4_096 _snake_case = 24 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 512 _snake_case = 2_048 elif "huge" in model_name: _snake_case = 1_280 _snake_case = 5_120 _snake_case = 32 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 640 _snake_case = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' if "encoder." in name: _snake_case = name.replace('encoder.' , '' ) if "cls_token" in name: _snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: _snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: _snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: _snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: _snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: _snake_case = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: _snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: _snake_case = name.replace('attn' , 'attention.self' ) if "attn" in name: _snake_case = name.replace('attn' , 'attention.attention' ) if "norm1" in name: _snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: _snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: _snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: _snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: _snake_case = name.replace('head' , 'classifier' ) return name def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): _snake_case = orig_state_dict.pop(UpperCamelCase__ ) if key.startswith('encoder.' ): _snake_case = key.replace('encoder.' , '' ) if "qkv" in key: _snake_case = key.split('.' ) if key.startswith('decoder.blocks' ): _snake_case = config.decoder_hidden_size _snake_case = int(key_split[2] ) _snake_case = 'decoder.decoder_layers.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = config.hidden_size _snake_case = int(key_split[1] ) _snake_case = 'videomae.encoder.layer.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = val return orig_state_dict def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _snake_case = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) _snake_case = np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' _snake_case = get_videomae_config(UpperCamelCase__ ) if "finetuned" in model_name: _snake_case = VideoMAEForVideoClassification(UpperCamelCase__ ) else: _snake_case = VideoMAEForPreTraining(UpperCamelCase__ ) # download original checkpoint, hosted on Google Drive _snake_case = 'pytorch_model.bin' gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' ) if "model" in files: _snake_case = files['model'] else: _snake_case = files['module'] _snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify model on basic input _snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _snake_case = prepare_video() _snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' ) if "finetuned" not in model_name: _snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) _snake_case = torch.load(UpperCamelCase__ ) _snake_case = model(**UpperCamelCase__ ) _snake_case = outputs.logits _snake_case = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one _snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": _snake_case = outputs.loss assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(UpperCamelCase__ , organization='nielsr' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
295
0
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str = " " ) -> list: '''simple docstring''' _snake_case = [] _snake_case = 0 for index, char in enumerate(UpperCamelCase__ ): if char == separator: split_words.append(string[last_index:index] ) _snake_case = index + 1 elif index + 1 == len(UpperCamelCase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
354
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
295
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> int: '''simple docstring''' _snake_case = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): _snake_case = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): _snake_case = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _snake_case = key[key.find('patch_embed' ) + len('patch_embed' )] _snake_case = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase__ )-1}''' ) if "norm" in key: _snake_case = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _snake_case = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] _snake_case = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase__ )-1}''' ) if "layer_norm1" in key: _snake_case = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: _snake_case = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 _snake_case = key[key.find('block' ) + len('block' )] _snake_case = key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase__ )-1}''' ) if "attn.q" in key: _snake_case = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: _snake_case = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: _snake_case = key.replace('attn' , 'attention.self' ) if "fc1" in key: _snake_case = key.replace('fc1' , 'dense1' ) if "fc2" in key: _snake_case = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: _snake_case = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: _snake_case = key.replace('linear_fuse.conv' , 'linear_fuse' ) _snake_case = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _snake_case = key[key.find('linear_c' ) + len('linear_c' )] _snake_case = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase__ )-1}''' ) if "bot_conv" in key: _snake_case = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: _snake_case = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: _snake_case = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: _snake_case = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: _snake_case = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: _snake_case = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: _snake_case = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): _snake_case = key.replace('module.last_layer_depth' , 'head.head' ) _snake_case = value return new_state_dict def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _snake_case = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) _snake_case = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict _snake_case = kv_weight[ : config.hidden_sizes[i], : ] _snake_case = kv_bias[: config.hidden_sizes[i]] _snake_case = kv_weight[ config.hidden_sizes[i] :, : ] _snake_case = kv_bias[config.hidden_sizes[i] :] def lowerCamelCase__ ( ) -> int: '''simple docstring''' _snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg' _snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return image @torch.no_grad() def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=False , UpperCamelCase__ : int=None ) -> Optional[int]: '''simple docstring''' _snake_case = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _snake_case = GLPNImageProcessor() # prepare image _snake_case = prepare_img() _snake_case = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict _snake_case = torch.load(UpperCamelCase__ , map_location=torch.device('cpu' ) ) # rename keys _snake_case = rename_keys(UpperCamelCase__ ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase__ , UpperCamelCase__ ) # create HuggingFace model and load state dict _snake_case = GLPNForDepthEstimation(UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # forward pass _snake_case = model(UpperCamelCase__ ) _snake_case = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _snake_case = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: _snake_case = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) _snake_case = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) UpperCAmelCase_ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
355
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
0
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=_lowerCamelCase ): lowerCAmelCase_ = ['''note_seq'''] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: requires_backends(self , ['note_seq'] ) @classmethod def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str: requires_backends(cls , ['note_seq'] ) @classmethod def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str: requires_backends(cls , ['note_seq'] )
356
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' _snake_case = OmegaConf.load(UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] _snake_case = list(state_dict.keys() ) # extract state_dict for VQVAE _snake_case = {} _snake_case = 'first_stage_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] # extract state_dict for UNetLDM _snake_case = {} _snake_case = 'model.diffusion_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] _snake_case = config.model.params.first_stage_config.params _snake_case = config.model.params.unet_config.params _snake_case = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _snake_case = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _snake_case = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) UpperCAmelCase_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
295
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = (DPMSolverSDEScheduler,) lowerCAmelCase_ = 10 def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = { 'num_train_timesteps': 1100, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**lowerCAmelCase_ ) return config def lowerCAmelCase ( self ) -> Optional[Any]: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[int]: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma _snake_case = sample.to(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = output.prev_sample _snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) ) _snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2 assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2 assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3 def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config(prediction_type='v_prediction' ) _snake_case = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma _snake_case = sample.to(lowerCAmelCase_ ) for i, t in enumerate(scheduler.timesteps ): _snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = output.prev_sample _snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) ) _snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2 assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2 assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2 assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3 def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**lowerCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ ) _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = output.prev_sample _snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) ) _snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2 assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2 assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2 assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3 def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**lowerCAmelCase_ , use_karras_sigmas=lowerCAmelCase_ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ ) _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma _snake_case = sample.to(lowerCAmelCase_ ) for t in scheduler.timesteps: _snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = output.prev_sample _snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) ) _snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
357
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase_ : @property def lowerCAmelCase ( self ) -> int: return self.get_dummy_input() @property def lowerCAmelCase ( self ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]: _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) return dummy_input def lowerCAmelCase ( self ) -> Tuple: _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) unet_block.to(lowerCAmelCase_ ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowerCAmelCase ( self ) -> Tuple: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() _snake_case = model(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ ) _snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward()
295
0
from math import sqrt def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' _snake_case = 0 for i in range(1 , int(sqrt(UpperCamelCase__ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase__ ): total += i + n // i elif i == sqrt(UpperCamelCase__ ): total += i return total - n def lowerCamelCase__ ( UpperCamelCase__ : int = 10_000 ) -> int: '''simple docstring''' _snake_case = sum( i for i in range(1 , UpperCamelCase__ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
358
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case , _snake_case = 9, 14 # noqa: F841 _snake_case = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case = mst(UpperCamelCase__ ) _snake_case = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case = tuple(answer[:2] ) _snake_case = tuple(edge[::-1] ) assert edge in result or reverse in result
295
0
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = 42 class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): lowerCAmelCase_ = True @register_to_config def __init__( self , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = ("DownEncoderBlock2D",) , lowerCAmelCase_ = ("UpDecoderBlock2D",) , lowerCAmelCase_ = (64,) , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "silu" , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 0.1_82_15 , ) -> Any: super().__init__() # pass init params to Encoder _snake_case = Encoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , ) # pass init params to Decoder _snake_case = Decoder( in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , ) _snake_case = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) _snake_case = False _snake_case = False # only relevant if vae tiling is enabled _snake_case = self.config.sample_size _snake_case = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) _snake_case = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) _snake_case = 0.25 def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]: if isinstance(lowerCAmelCase_ , (Encoder, Decoder) ): _snake_case = value def lowerCAmelCase ( self , lowerCAmelCase_ = True ) -> List[Any]: _snake_case = use_tiling def lowerCAmelCase ( self ) -> Optional[Any]: self.enable_tiling(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = True def lowerCAmelCase ( self ) -> Any: _snake_case = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCAmelCase ( self ) -> Dict[str, AttentionProcessor]: _snake_case = {} def fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): if hasattr(lowerCAmelCase_ , 'set_processor' ): _snake_case = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , lowerCAmelCase_ , lowerCAmelCase_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return processors def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _snake_case = len(self.attn_processors.keys() ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase_ )} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): if hasattr(lowerCAmelCase_ , 'set_processor' ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): module.set_processor(lowerCAmelCase_ ) else: module.set_processor(processor.pop(F'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , lowerCAmelCase_ , lowerCAmelCase_ ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[int]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) if self.use_slicing and x.shape[0] > 1: _snake_case = [self.encoder(lowerCAmelCase_ ) for x_slice in x.split(1 )] _snake_case = torch.cat(lowerCAmelCase_ ) else: _snake_case = self.encoder(lowerCAmelCase_ ) _snake_case = self.quant_conv(lowerCAmelCase_ ) _snake_case = DiagonalGaussianDistribution(lowerCAmelCase_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = self.post_quant_conv(lowerCAmelCase_ ) _snake_case = self.decoder(lowerCAmelCase_ ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) @apply_forward_hook def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: _snake_case = [self._decode(lowerCAmelCase_ ).sample for z_slice in z.split(1 )] _snake_case = torch.cat(lowerCAmelCase_ ) else: _snake_case = self._decode(lowerCAmelCase_ ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = min(a.shape[2] , b.shape[2] , lowerCAmelCase_ ) for y in range(lowerCAmelCase_ ): _snake_case = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = min(a.shape[3] , b.shape[3] , lowerCAmelCase_ ) for x in range(lowerCAmelCase_ ): _snake_case = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> AutoencoderKLOutput: _snake_case = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) _snake_case = int(self.tile_latent_min_size * self.tile_overlap_factor ) _snake_case = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. _snake_case = [] for i in range(0 , x.shape[2] , lowerCAmelCase_ ): _snake_case = [] for j in range(0 , x.shape[3] , lowerCAmelCase_ ): _snake_case = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] _snake_case = self.encoder(lowerCAmelCase_ ) _snake_case = self.quant_conv(lowerCAmelCase_ ) row.append(lowerCAmelCase_ ) rows.append(lowerCAmelCase_ ) _snake_case = [] for i, row in enumerate(lowerCAmelCase_ ): _snake_case = [] for j, tile in enumerate(lowerCAmelCase_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _snake_case = self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_ ) if j > 0: _snake_case = self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCAmelCase_ , dim=3 ) ) _snake_case = torch.cat(lowerCAmelCase_ , dim=2 ) _snake_case = DiagonalGaussianDistribution(lowerCAmelCase_ ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]: _snake_case = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) _snake_case = int(self.tile_sample_min_size * self.tile_overlap_factor ) _snake_case = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. _snake_case = [] for i in range(0 , z.shape[2] , lowerCAmelCase_ ): _snake_case = [] for j in range(0 , z.shape[3] , lowerCAmelCase_ ): _snake_case = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] _snake_case = self.post_quant_conv(lowerCAmelCase_ ) _snake_case = self.decoder(lowerCAmelCase_ ) row.append(lowerCAmelCase_ ) rows.append(lowerCAmelCase_ ) _snake_case = [] for i, row in enumerate(lowerCAmelCase_ ): _snake_case = [] for j, tile in enumerate(lowerCAmelCase_ ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _snake_case = self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_ ) if j > 0: _snake_case = self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_ ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCAmelCase_ , dim=3 ) ) _snake_case = torch.cat(lowerCAmelCase_ , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]: _snake_case = sample _snake_case = self.encode(lowerCAmelCase_ ).latent_dist if sample_posterior: _snake_case = posterior.sample(generator=lowerCAmelCase_ ) else: _snake_case = posterior.mode() _snake_case = self.decode(lowerCAmelCase_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase_ )
359
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
295
0
from typing import Any class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case = data _snake_case = None def __repr__( self ) -> str: return F'''Node({self.data})''' class UpperCamelCase_ : def __init__( self ) -> Union[str, Any]: _snake_case = None def __iter__( self ) -> Any: _snake_case = self.head while node: yield node.data _snake_case = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(lowerCAmelCase_ ) for item in self] ) def __getitem__( self , lowerCAmelCase_ ) -> Any: if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) _snake_case = self.head for _ in range(lowerCAmelCase_ ): _snake_case = current.next _snake_case = data def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None: self.insert_nth(len(self ) , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None: self.insert_nth(0 , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) _snake_case = Node(lowerCAmelCase_ ) if self.head is None: _snake_case = new_node elif index == 0: _snake_case = self.head # link new_node to head _snake_case = new_node else: _snake_case = self.head for _ in range(index - 1 ): _snake_case = temp.next _snake_case = temp.next _snake_case = new_node def lowerCAmelCase ( self ) -> None: # print every node data print(self ) def lowerCAmelCase ( self ) -> Any: return self.delete_nth(0 ) def lowerCAmelCase ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def lowerCAmelCase ( self , lowerCAmelCase_ = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) _snake_case = self.head # default first node if index == 0: _snake_case = self.head.next else: _snake_case = self.head for _ in range(index - 1 ): _snake_case = temp.next _snake_case = temp.next _snake_case = temp.next.next return delete_node.data def lowerCAmelCase ( self ) -> bool: return self.head is None def lowerCAmelCase ( self ) -> None: _snake_case = None _snake_case = self.head while current: # Store the current node's next node. _snake_case = current.next # Make the current node's next point backwards _snake_case = prev # Make the previous node be the current node _snake_case = current # Make the current node the next node (to progress iteration) _snake_case = next_node # Return prev in order to put the head at the end _snake_case = prev def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = LinkedList() assert linked_list.is_empty() is True assert str(UpperCamelCase__ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(UpperCamelCase__ ) == i linked_list.insert_nth(UpperCamelCase__ , i + 1 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(UpperCamelCase__ ) == 9 assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _snake_case = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(-8 , 1 ) ) def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = [ -9, 100, Node(77_345_112 ), 'dlrow olleH', 7, 5_555, 0, -192.5_5555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] _snake_case = LinkedList() for i in test_input: linked_list.insert_tail(UpperCamelCase__ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(UpperCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _snake_case = linked_list.delete_head() assert result == -9 assert ( str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _snake_case = linked_list.delete_tail() assert result == 12.2 assert ( str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _snake_case = linked_list.delete_nth(10 ) assert result is None assert ( str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(UpperCamelCase__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(UpperCamelCase__ ) assert ( str(UpperCamelCase__ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(UpperCamelCase__ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowerCamelCase__ ( ) -> List[Any]: '''simple docstring''' from doctest import testmod testmod() _snake_case = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(UpperCamelCase__ ) print('\nReading/changing Node data using indexing:' ) print(F'''Element at Position 1: {linked_list[1]}''' ) _snake_case = input('Enter New Value: ' ).strip() print('New list:' ) print(UpperCamelCase__ ) print(F'''length of linked_list is : {len(UpperCamelCase__ )}''' ) if __name__ == "__main__": main()
360
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCamelCase_ ( enum.Enum ): lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 @add_end_docstrings(_lowerCamelCase ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _snake_case = None if self.model.config.prefix is not None: _snake_case = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _snake_case = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params ) _snake_case = {**self._preprocess_params, **preprocess_params} _snake_case = {**self._forward_params, **forward_params} def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple: _snake_case = {} if prefix is not None: _snake_case = prefix if prefix: _snake_case = self.tokenizer( lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) _snake_case = handle_long_generation preprocess_params.update(lowerCAmelCase_ ) _snake_case = generate_kwargs _snake_case = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.TENSORS if return_type is not None: _snake_case = return_type if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if stop_sequence is not None: _snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any: _snake_case = self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prompt_text if handle_long_generation == "hole": _snake_case = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _snake_case = generate_kwargs['max_new_tokens'] else: _snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _snake_case = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) _snake_case = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _snake_case = inputs['attention_mask'][:, -keep_length:] return inputs def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = model_inputs['input_ids'] _snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ ) # Allow empty prompts if input_ids.shape[1] == 0: _snake_case = None _snake_case = None _snake_case = 1 else: _snake_case = input_ids.shape[0] _snake_case = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _snake_case = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: _snake_case = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _snake_case = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = generated_sequence.shape[0] if self.framework == "pt": _snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int: _snake_case = model_outputs['generated_sequence'][0] _snake_case = model_outputs['input_ids'] _snake_case = model_outputs['prompt_text'] _snake_case = generated_sequence.numpy().tolist() _snake_case = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _snake_case = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _snake_case = self.tokenizer.decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _snake_case = 0 else: _snake_case = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) ) if return_type == ReturnType.FULL_TEXT: _snake_case = prompt_text + text[prompt_length:] else: _snake_case = text[prompt_length:] _snake_case = {'generated_text': all_text} records.append(lowerCAmelCase_ ) return records
295
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _snake_case = BigBirdConfig.from_json_file(UpperCamelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: _snake_case = BigBirdForQuestionAnswering(UpperCamelCase__ ) else: _snake_case = BigBirdForPreTraining(UpperCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase__ , UpperCamelCase__ , is_trivia_qa=UpperCamelCase__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) UpperCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
361
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
295
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass UpperCAmelCase_ = (3, 9, -11, 0, 7, 5, 1, -1) UpperCAmelCase_ = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class UpperCamelCase_ : lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ ) -> None: _snake_case = None for i in sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ): _snake_case = Node(lowerCAmelCase_ , self.head ) def __iter__( self ) -> Iterator[int]: _snake_case = self.head while node: yield node.data _snake_case = node.next_node def __len__( self ) -> int: return sum(1 for _ in self ) def __str__( self ) -> str: return " -> ".join([str(lowerCAmelCase_ ) for node in self] ) def lowerCamelCase__ ( UpperCamelCase__ : SortedLinkedList , UpperCamelCase__ : SortedLinkedList ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
362
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> Any: return len(self.encoder ) def lowerCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
295
0
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' ) _snake_case = parser.add_subparsers(help='transformers-cli command helpers' ) # Register commands ConvertCommand.register_subcommand(UpperCamelCase__ ) DownloadCommand.register_subcommand(UpperCamelCase__ ) EnvironmentCommand.register_subcommand(UpperCamelCase__ ) RunCommand.register_subcommand(UpperCamelCase__ ) ServeCommand.register_subcommand(UpperCamelCase__ ) UserCommands.register_subcommand(UpperCamelCase__ ) AddNewModelCommand.register_subcommand(UpperCamelCase__ ) AddNewModelLikeCommand.register_subcommand(UpperCamelCase__ ) LfsCommands.register_subcommand(UpperCamelCase__ ) PTtoTFCommand.register_subcommand(UpperCamelCase__ ) # Let's go _snake_case = parser.parse_args() if not hasattr(UpperCamelCase__ , 'func' ): parser.print_help() exit(1 ) # Run _snake_case = args.func(UpperCamelCase__ ) service.run() if __name__ == "__main__": main()
363
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase_ = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: _snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) else: _snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) _snake_case = ['key_proj', 'value_proj', 'query_proj'] _snake_case = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: _snake_case = key.split('.' ) if attributes[0] == "lm_head": _snake_case = prophet _snake_case = prophet_old else: _snake_case = prophet.prophetnet _snake_case = prophet_old.model _snake_case = False for attribute in attributes: if attribute in mapping: _snake_case = mapping[attribute] if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0: _snake_case = attribute elif hasattr(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _snake_case = old_model.weight logger.info(F'''{attribute} is initialized.''' ) _snake_case = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _snake_case = old_model.bias logger.info(F'''{attribute} is initialized''' ) _snake_case = True break elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ): _snake_case = old_model.in_proj_weight.shape[0] // 3 _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _snake_case = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." _snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] ) _snake_case = True break if attribute.isdigit(): _snake_case = model[int(UpperCamelCase__ )] _snake_case = old_model[int(UpperCamelCase__ )] else: _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if old_attribute == "": _snake_case = old_model else: if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
295
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""CLIPFeatureExtractor"""] UpperCAmelCase_ = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
364
import random def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict: '''simple docstring''' _snake_case = {i: [] for i in range(UpperCamelCase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCamelCase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCamelCase__ ): for j in range(i + 1 , UpperCamelCase__ ): if random.random() < probability: graph[i].append(UpperCamelCase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCamelCase__ ) return graph def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict: '''simple docstring''' return { i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
295
0
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str: '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = F'''Expected string as input, found {type(UpperCamelCase__ )}''' raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = F'''Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}''' raise ValueError(UpperCamelCase__ ) _snake_case = input_str.split('_' ) _snake_case = 0 if use_pascal else 1 _snake_case = words[start_index:] _snake_case = [word[0].upper() + word[1:] for word in words_to_capitalize] _snake_case = '' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
365
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = encoder_stride _snake_case = num_attention_outputs _snake_case = embed_dim _snake_case = embed_dim + 1 _snake_case = resolution _snake_case = depths _snake_case = hidden_sizes _snake_case = dim _snake_case = mlp_expansion_ratio def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self ) -> Tuple: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = self.type_sequence_label_size _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def lowerCAmelCase ( self ) -> Optional[Any]: pass def lowerCAmelCase ( self ) -> str: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): _snake_case = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: _snake_case = seq_length * self.model_tester.chunk_length else: _snake_case = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _snake_case = outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> str: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): _snake_case = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase ( self ) -> Dict: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _snake_case = model_class(lowerCAmelCase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _snake_case = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _snake_case = model(lowerCAmelCase_ ) self.assertTrue(outputs_dict is not None ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self ) -> Dict: return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
295
0
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Optional[int]: '''simple docstring''' return getitem, k def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Dict: '''simple docstring''' return setitem, k, v def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' return delitem, k def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , *UpperCamelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' try: return fun(UpperCamelCase__ , *UpperCamelCase__ ), None except Exception as e: return None, e UpperCAmelCase_ = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) UpperCAmelCase_ = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] UpperCAmelCase_ = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] UpperCAmelCase_ = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] UpperCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] UpperCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( 'operations' , ( pytest.param(_add_items , id='add items' ), pytest.param(_overwrite_items , id='overwrite items' ), pytest.param(_delete_items , id='delete items' ), pytest.param(_access_absent_items , id='access absent items' ), pytest.param(_add_with_resize_up , id='add with resize up' ), pytest.param(_add_with_resize_down , id='add with resize down' ), ) , ) def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> int: '''simple docstring''' _snake_case = HashMap(initial_block_size=4 ) _snake_case = {} for _, (fun, *args) in enumerate(UpperCamelCase__ ): _snake_case , _snake_case = _run_operation(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) _snake_case , _snake_case = _run_operation(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) assert my_res == py_res assert str(UpperCamelCase__ ) == str(UpperCamelCase__ ) assert set(UpperCamelCase__ ) == set(UpperCamelCase__ ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' def is_public(UpperCamelCase__ : str ) -> bool: return not name.startswith('_' ) _snake_case = {name for name in dir({} ) if is_public(UpperCamelCase__ )} _snake_case = {name for name in dir(HashMap() ) if is_public(UpperCamelCase__ )} assert dict_public_names > hash_public_names
366
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = LEDTokenizer lowerCAmelCase_ = LEDTokenizerFast lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> List[str]: super().setUp() _snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _snake_case = {'unk_token': '<unk>'} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self ) -> Optional[Any]: return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def lowerCAmelCase ( self ) -> Union[str, Any]: return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIn('input_ids' , lowerCAmelCase_ ) self.assertIn('attention_mask' , lowerCAmelCase_ ) self.assertNotIn('labels' , lowerCAmelCase_ ) self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.'] _snake_case = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' ) _snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' ) _snake_case = inputs['input_ids'] _snake_case = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = ['Summary of the text.', 'Another summary.'] _snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']] _snake_case = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Tuple: pass def lowerCAmelCase ( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = 'A, <mask> AllenNLP sentence.' _snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
295
0
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y ) def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int = 20 ) -> int: '''simple docstring''' _snake_case = 1 for i in range(1 , n + 1 ): _snake_case = lcm(UpperCamelCase__ , UpperCamelCase__ ) return g if __name__ == "__main__": print(F"{solution() = }")
367
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = BertTokenizer lowerCAmelCase_ = BertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english def lowerCAmelCase ( self ) -> Optional[int]: super().setUp() _snake_case = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = 'UNwant\u00E9d,running' _snake_case = 'unwanted, running' return input_text, output_text def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.tokenizer_class(self.vocab_file ) _snake_case = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def lowerCAmelCase ( self ) -> Tuple: if not self.test_rust_tokenizer: return _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing _snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase ( self ) -> Any: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Dict: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer() _snake_case = 'a\n\'ll !!to?\'d of, can\'t.' _snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _snake_case = {} for i, token in enumerate(lowerCAmelCase_ ): _snake_case = i _snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase ( self ) -> Tuple: self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase ( self ) -> Dict: self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase ( self ) -> int: self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' ) _snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def lowerCAmelCase ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _snake_case = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) _snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False _snake_case = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase ( self ) -> str: _snake_case = ['的', '人', '有'] _snake_case = ''.join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = True _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". _snake_case = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
295
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''gpt_neo''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple: _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_layers _snake_case = num_heads _snake_case = intermediate_size _snake_case = window_size _snake_case = activation_function _snake_case = resid_dropout _snake_case = embed_dropout _snake_case = attention_dropout _snake_case = classifier_dropout _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = use_cache _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = attention_types _snake_case = self.expand_attention_types_params(lowerCAmelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' import torch _snake_case = input.size() _snake_case = len(UpperCamelCase__ ) _snake_case = shape[dimension] _snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ ) _snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1 _snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None] _snake_case = [slice(UpperCamelCase__ )] * rank _snake_case = indices _snake_case = input[s] _snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' import torch _snake_case = torch.arange(1 , UpperCamelCase__ ) _snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = remainders == 0 _snake_case = candidates[divisor_indices] _snake_case = torch.max(UpperCamelCase__ ) return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' ) class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) _snake_case = {0: 'batch', 1: 'past_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCAmelCase ( self ) -> int: return self._config.num_heads def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: _snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) # We need to order the input in the way they appears in the forward() _snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers ) ] _snake_case = common_inputs['attention_mask'] if self.use_past: _snake_case = ordered_inputs['attention_mask'].dtype _snake_case = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self ) -> int: return 13
368
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
295
0
from __future__ import annotations from typing import Any class UpperCamelCase_ ( _lowerCamelCase ): """simple docstring""" pass class UpperCamelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> None: _snake_case = data _snake_case = None def __iter__( self ) -> Optional[int]: _snake_case = self _snake_case = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCAmelCase_ ) yield node.data _snake_case = node.next_node @property def lowerCAmelCase ( self ) -> bool: try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCAmelCase_ = Node(1) UpperCAmelCase_ = Node(2) UpperCAmelCase_ = Node(3) UpperCAmelCase_ = Node(4) print(root_node.has_loop) # False UpperCAmelCase_ = root_node.next_node print(root_node.has_loop) # True UpperCAmelCase_ = Node(5) UpperCAmelCase_ = Node(6) UpperCAmelCase_ = Node(5) UpperCAmelCase_ = Node(6) print(root_node.has_loop) # False UpperCAmelCase_ = Node(1) print(root_node.has_loop) # False
369
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path UpperCAmelCase_ = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: with TemporaryDirectory() as tmp_dir: _snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) _snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ ) _snake_case = builder_cls( cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , ) _snake_case = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) _snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) self.assertTrue(os.path.exists(lowerCAmelCase_ ) ) @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' _snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _snake_case = None builder_instance.download_and_prepare() _snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) _snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert "train" in ds assert isinstance(ds['train'] , UpperCamelCase__ ) assert next(iter(ds['train'] ) )
295
0
def lowerCamelCase__ ( ) -> int: '''simple docstring''' return 1 def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int: '''simple docstring''' return two_pound(UpperCamelCase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
370
def lowerCamelCase__ ( ) -> int: '''simple docstring''' return 1 def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int: '''simple docstring''' return two_pound(UpperCamelCase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
295
0
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class UpperCamelCase_ ( unittest.TestCase ): def lowerCAmelCase ( self ) -> Dict: _snake_case = logging.get_logger() # the current default level is logging.WARNING _snake_case = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = logging.get_verbosity() _snake_case = logging.get_logger('transformers.models.bart.tokenization_bart' ) _snake_case = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase_ ) as cl: logger.warning(lowerCAmelCase_ ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase_ ) as cl: logger.warning(lowerCAmelCase_ ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase_ ) as cl: logger.warning(lowerCAmelCase_ ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase_ ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def lowerCAmelCase ( self ) -> List[str]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _snake_case = logging.get_logger('transformers.models.bart.tokenization_bart' ) _snake_case = os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase_ ) _snake_case = logging.log_levels[env_level_str] _snake_case = logging.get_verbosity() self.assertEqual( lowerCAmelCase_ , lowerCAmelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , ) # restore to the original level _snake_case = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def lowerCAmelCase ( self ) -> Union[str, Any]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() _snake_case = logging.logging.getLogger() with CaptureLogger(lowerCAmelCase_ ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def lowerCAmelCase ( self ) -> List[str]: # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() _snake_case = logging.get_logger('transformers.models.bart.tokenization_bart' ) _snake_case = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase_ ) as cl: logger.warning_advice(lowerCAmelCase_ ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase_ ) as cl: logger.warning_advice(lowerCAmelCase_ ) self.assertEqual(cl.out , msg + '\n' ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
371
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple: '''simple docstring''' _snake_case = [0 for i in range(r + 1 )] # nc0 = 1 _snake_case = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _snake_case = min(UpperCamelCase__ , UpperCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
295
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> int: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = scope _snake_case = self.vocab_size - 1 def lowerCAmelCase ( self ) -> int: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = ids_tensor([self.batch_size] , self.num_choices ) _snake_case = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) -> Optional[Any]: _snake_case = OpenAIGPTModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) -> Dict: _snake_case = OpenAIGPTLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) -> List[str]: _snake_case = OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) -> Dict: _snake_case = self.num_labels _snake_case = OpenAIGPTForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowerCAmelCase_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowerCAmelCase_ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]: _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , ) _snake_case = inputs_dict['labels'] _snake_case = inputs_dict['labels'] _snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , ) _snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowerCAmelCase ( self ) -> List[str]: _snake_case = OpenAIGPTModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 ) def lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> Dict: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = OpenAIGPTModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase_ ) _snake_case = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is _snake_case = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _snake_case = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
350
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]: super().__init__() _snake_case = nn.Sequential( nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , ) _snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.ModuleList() for lyr_num in range(lowerCAmelCase_ ): # FiLM conditional T5 decoder _snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) self.decoders.append(lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case , _snake_case , _snake_case = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _snake_case = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _snake_case = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _snake_case = torch.broadcast_to( torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _snake_case = self.position_encoding(lowerCAmelCase_ ) _snake_case = self.continuous_inputs_projection(lowerCAmelCase_ ) inputs += position_encodings _snake_case = self.dropout(lowerCAmelCase_ ) # decoder: No padding present. _snake_case = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings _snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _snake_case = lyr( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0] _snake_case = self.decoder_norm(lowerCAmelCase_ ) _snake_case = self.post_dropout(lowerCAmelCase_ ) _snake_case = self.spec_out(lowerCAmelCase_ ) return spec_out class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple: super().__init__() _snake_case = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple: _snake_case = self.layer[0]( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) if encoder_hidden_states is not None: _snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _snake_case = self.layer[1]( lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) # Apply Film Conditional Feed Forward layer _snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ ) return (hidden_states,) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: super().__init__() _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str: # pre_self_attention_layer_norm _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ ) # Self-attention block _snake_case = self.attention(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__() _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict: _snake_case = self.layer_norm(lowerCAmelCase_ ) _snake_case = self.attention( lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return layer_output class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: super().__init__() _snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]: _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.DenseReluDense(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) _snake_case = NewGELUActivation() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any: _snake_case = self.act(self.wi_a(lowerCAmelCase_ ) ) _snake_case = self.wi_a(lowerCAmelCase_ ) _snake_case = hidden_gelu * hidden_linear _snake_case = self.dropout(lowerCAmelCase_ ) _snake_case = self.wo(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str: super().__init__() _snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) ) _snake_case = eps def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ ) _snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _snake_case = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class UpperCamelCase_ ( nn.Module ): def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) )) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = self.scale_bias(lowerCAmelCase_ ) _snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 ) _snake_case = x * (1 + scale) + shift return x
295
0
from __future__ import annotations class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _snake_case , _snake_case = text, pattern _snake_case , _snake_case = len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCAmelCase ( self ) -> list[int]: # searches pattern in text and returns index positions _snake_case = [] for i in range(self.textLen - self.patLen + 1 ): _snake_case = self.mismatch_in_text(lowerCAmelCase_ ) if mismatch_index == -1: positions.append(lowerCAmelCase_ ) else: _snake_case = self.match_in_pattern(self.text[mismatch_index] ) _snake_case = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions UpperCAmelCase_ = """ABAABA""" UpperCAmelCase_ = """AB""" UpperCAmelCase_ = BoyerMooreSearch(text, pattern) UpperCAmelCase_ = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
351
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''gpt_neo''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple: _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_layers _snake_case = num_heads _snake_case = intermediate_size _snake_case = window_size _snake_case = activation_function _snake_case = resid_dropout _snake_case = embed_dropout _snake_case = attention_dropout _snake_case = classifier_dropout _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = use_cache _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = attention_types _snake_case = self.expand_attention_types_params(lowerCAmelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' import torch _snake_case = input.size() _snake_case = len(UpperCamelCase__ ) _snake_case = shape[dimension] _snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ ) _snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1 _snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None] _snake_case = [slice(UpperCamelCase__ )] * rank _snake_case = indices _snake_case = input[s] _snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' import torch _snake_case = torch.arange(1 , UpperCamelCase__ ) _snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = remainders == 0 _snake_case = candidates[divisor_indices] _snake_case = torch.max(UpperCamelCase__ ) return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' ) class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) _snake_case = {0: 'batch', 1: 'past_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCAmelCase ( self ) -> int: return self._config.num_heads def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: _snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) # We need to order the input in the way they appears in the forward() _snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers ) ] _snake_case = common_inputs['attention_mask'] if self.use_past: _snake_case = ordered_inputs['attention_mask'].dtype _snake_case = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self ) -> int: return 13
295
0
from PIL import Image def lowerCamelCase__ ( UpperCamelCase__ : Image , UpperCamelCase__ : float ) -> Image: '''simple docstring''' def brightness(UpperCamelCase__ : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 UpperCAmelCase_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
352
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' _snake_case , _snake_case = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _snake_case = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
295
0
from ...processing_utils import ProcessorMixin class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''WhisperFeatureExtractor''' lowerCAmelCase_ = '''WhisperTokenizer''' def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.feature_extractor _snake_case = False def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True ) -> Tuple: return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase_ , language=lowerCAmelCase_ , no_timestamps=lowerCAmelCase_ ) def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = kwargs.pop('audio' , lowerCAmelCase_ ) _snake_case = kwargs.pop('sampling_rate' , lowerCAmelCase_ ) _snake_case = kwargs.pop('text' , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: _snake_case = args[0] _snake_case = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: _snake_case = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: _snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: _snake_case = encodings['input_ids'] return inputs def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]: return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="np" ) -> List[str]: return self.tokenizer.get_prompt_ids(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
353
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' _snake_case = VideoMAEConfig() set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ ) if "finetuned" not in model_name: _snake_case = False if "finetuned" in model_name: _snake_case = 'huggingface/label-files' if "kinetics" in model_name: _snake_case = 400 _snake_case = 'kinetics400-id2label.json' elif "ssv2" in model_name: _snake_case = 174 _snake_case = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) _snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) _snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int: '''simple docstring''' if "small" in model_name: _snake_case = 384 _snake_case = 1_536 _snake_case = 12 _snake_case = 16 _snake_case = 12 _snake_case = 3 _snake_case = 192 _snake_case = 768 elif "large" in model_name: _snake_case = 1_024 _snake_case = 4_096 _snake_case = 24 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 512 _snake_case = 2_048 elif "huge" in model_name: _snake_case = 1_280 _snake_case = 5_120 _snake_case = 32 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 640 _snake_case = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' if "encoder." in name: _snake_case = name.replace('encoder.' , '' ) if "cls_token" in name: _snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: _snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: _snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: _snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: _snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: _snake_case = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: _snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: _snake_case = name.replace('attn' , 'attention.self' ) if "attn" in name: _snake_case = name.replace('attn' , 'attention.attention' ) if "norm1" in name: _snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: _snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: _snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: _snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: _snake_case = name.replace('head' , 'classifier' ) return name def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): _snake_case = orig_state_dict.pop(UpperCamelCase__ ) if key.startswith('encoder.' ): _snake_case = key.replace('encoder.' , '' ) if "qkv" in key: _snake_case = key.split('.' ) if key.startswith('decoder.blocks' ): _snake_case = config.decoder_hidden_size _snake_case = int(key_split[2] ) _snake_case = 'decoder.decoder_layers.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = config.hidden_size _snake_case = int(key_split[1] ) _snake_case = 'videomae.encoder.layer.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = val return orig_state_dict def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _snake_case = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) _snake_case = np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' _snake_case = get_videomae_config(UpperCamelCase__ ) if "finetuned" in model_name: _snake_case = VideoMAEForVideoClassification(UpperCamelCase__ ) else: _snake_case = VideoMAEForPreTraining(UpperCamelCase__ ) # download original checkpoint, hosted on Google Drive _snake_case = 'pytorch_model.bin' gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' ) if "model" in files: _snake_case = files['model'] else: _snake_case = files['module'] _snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify model on basic input _snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _snake_case = prepare_video() _snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' ) if "finetuned" not in model_name: _snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) _snake_case = torch.load(UpperCamelCase__ ) _snake_case = model(**UpperCamelCase__ ) _snake_case = outputs.logits _snake_case = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one _snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": _snake_case = outputs.loss assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(UpperCamelCase__ , organization='nielsr' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
295
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
354
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
295
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool: '''simple docstring''' _snake_case = get_failure_array(UpperCamelCase__ ) # 2) Step through text searching for pattern _snake_case , _snake_case = 0, 0 # index into text, pattern while i < len(UpperCamelCase__ ): if pattern[j] == text[i]: if j == (len(UpperCamelCase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _snake_case = failure[j - 1] continue i += 1 return False def lowerCamelCase__ ( UpperCamelCase__ : str ) -> list[int]: '''simple docstring''' _snake_case = [0] _snake_case = 0 _snake_case = 1 while j < len(UpperCamelCase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _snake_case = failure[i - 1] continue j += 1 failure.append(UpperCamelCase__ ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase_ = """abc1abc12""" UpperCAmelCase_ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" UpperCAmelCase_ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase_ = """ABABX""" UpperCAmelCase_ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) UpperCAmelCase_ = """AAAB""" UpperCAmelCase_ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) UpperCAmelCase_ = """abcdabcy""" UpperCAmelCase_ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) UpperCAmelCase_ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
355
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
0
from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : int ) -> list[int]: '''simple docstring''' _snake_case = [True] * limit _snake_case = False _snake_case = False _snake_case = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): _snake_case = i * 2 while index < limit: _snake_case = False _snake_case = index + i _snake_case = [2] for i in range(3 , UpperCamelCase__ , 2 ): if is_prime[i]: primes.append(UpperCamelCase__ ) return primes def lowerCamelCase__ ( UpperCamelCase__ : int = 1_000_000 ) -> int: '''simple docstring''' _snake_case = prime_sieve(UpperCamelCase__ ) _snake_case = 0 _snake_case = 0 for i in range(len(UpperCamelCase__ ) ): for j in range(i + length , len(UpperCamelCase__ ) ): _snake_case = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: _snake_case = j - i _snake_case = sol return largest if __name__ == "__main__": print(F"{solution() = }")
356
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' _snake_case = OmegaConf.load(UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] _snake_case = list(state_dict.keys() ) # extract state_dict for VQVAE _snake_case = {} _snake_case = 'first_stage_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] # extract state_dict for UNetLDM _snake_case = {} _snake_case = 'model.diffusion_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] _snake_case = config.model.params.first_stage_config.params _snake_case = config.model.params.unet_config.params _snake_case = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _snake_case = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _snake_case = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) UpperCAmelCase_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
295
0
import os import pytest from transformers.dynamic_module_utils import get_imports UpperCAmelCase_ = """ import os """ UpperCAmelCase_ = """ def foo(): import os return False """ UpperCAmelCase_ = """ def foo(): def bar(): if True: import os return False return bar() """ UpperCAmelCase_ = """ import os try: import bar except ImportError: raise ValueError() """ UpperCAmelCase_ = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ UpperCAmelCase_ = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ UpperCAmelCase_ = """ import os try: import bar except ImportError as e: raise ValueError() """ UpperCAmelCase_ = """ import os try: import bar except: raise ValueError() """ UpperCAmelCase_ = """ import os try: import bar import baz except ImportError: raise ValueError() """ UpperCAmelCase_ = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ UpperCAmelCase_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('case' , UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Union[str, Any]: '''simple docstring''' _snake_case = os.path.join(UpperCamelCase__ , 'test_file.py' ) with open(UpperCamelCase__ , 'w' ) as _tmp_file: _tmp_file.write(UpperCamelCase__ ) _snake_case = get_imports(UpperCamelCase__ ) assert parsed_imports == ["os"]
357
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase_ : @property def lowerCAmelCase ( self ) -> int: return self.get_dummy_input() @property def lowerCAmelCase ( self ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]: _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) return dummy_input def lowerCAmelCase ( self ) -> Tuple: _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) unet_block.to(lowerCAmelCase_ ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowerCAmelCase ( self ) -> Tuple: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() _snake_case = model(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ ) _snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward()
295
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 UpperCAmelCase_ = data_utils.TransfoXLTokenizer UpperCAmelCase_ = data_utils.TransfoXLCorpus UpperCAmelCase_ = data_utils UpperCAmelCase_ = data_utils def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> Any: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCamelCase__ , 'rb' ) as fp: _snake_case = pickle.load(UpperCamelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) _snake_case = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) _snake_case = corpus.vocab.__dict__ torch.save(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ ) _snake_case = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model _snake_case = os.path.abspath(UpperCamelCase__ ) _snake_case = os.path.abspath(UpperCamelCase__ ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": _snake_case = TransfoXLConfig() else: _snake_case = TransfoXLConfig.from_json_file(UpperCamelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) _snake_case = TransfoXLLMHeadModel(UpperCamelCase__ ) _snake_case = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model _snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) print(F'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' ) torch.save(model.state_dict() , UpperCamelCase__ ) print(F'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) UpperCAmelCase_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
358
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case , _snake_case = 9, 14 # noqa: F841 _snake_case = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case = mst(UpperCamelCase__ ) _snake_case = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case = tuple(answer[:2] ) _snake_case = tuple(edge[::-1] ) assert edge in result or reverse in result
295
0
from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[str]] , UpperCamelCase__ : int , ) -> None: '''simple docstring''' _snake_case = len(UpperCamelCase__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(UpperCamelCase__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> None: '''simple docstring''' _snake_case = [] depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ ) # Print all the boards for board in boards: for column in board: print(UpperCamelCase__ ) print('' ) print(len(UpperCamelCase__ ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
359
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
295
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""NllbTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""NllbTokenizerFast"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
360
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCamelCase_ ( enum.Enum ): lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 @add_end_docstrings(_lowerCamelCase ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _snake_case = None if self.model.config.prefix is not None: _snake_case = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _snake_case = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params ) _snake_case = {**self._preprocess_params, **preprocess_params} _snake_case = {**self._forward_params, **forward_params} def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple: _snake_case = {} if prefix is not None: _snake_case = prefix if prefix: _snake_case = self.tokenizer( lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) _snake_case = handle_long_generation preprocess_params.update(lowerCAmelCase_ ) _snake_case = generate_kwargs _snake_case = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.TENSORS if return_type is not None: _snake_case = return_type if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if stop_sequence is not None: _snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any: _snake_case = self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prompt_text if handle_long_generation == "hole": _snake_case = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _snake_case = generate_kwargs['max_new_tokens'] else: _snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _snake_case = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) _snake_case = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _snake_case = inputs['attention_mask'][:, -keep_length:] return inputs def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = model_inputs['input_ids'] _snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ ) # Allow empty prompts if input_ids.shape[1] == 0: _snake_case = None _snake_case = None _snake_case = 1 else: _snake_case = input_ids.shape[0] _snake_case = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _snake_case = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: _snake_case = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _snake_case = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = generated_sequence.shape[0] if self.framework == "pt": _snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int: _snake_case = model_outputs['generated_sequence'][0] _snake_case = model_outputs['input_ids'] _snake_case = model_outputs['prompt_text'] _snake_case = generated_sequence.numpy().tolist() _snake_case = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _snake_case = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _snake_case = self.tokenizer.decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _snake_case = 0 else: _snake_case = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) ) if return_type == ReturnType.FULL_TEXT: _snake_case = prompt_text + text[prompt_length:] else: _snake_case = text[prompt_length:] _snake_case = {'generated_text': all_text} records.append(lowerCAmelCase_ ) return records
295
0
from collections.abc import Callable import numpy as np def lowerCamelCase__ ( UpperCamelCase__ : Callable , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> np.array: '''simple docstring''' _snake_case = int(np.ceil((x_end - xa) / step_size ) ) _snake_case = np.zeros((n + 1,) ) _snake_case = ya _snake_case = xa for k in range(UpperCamelCase__ ): _snake_case = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] ) _snake_case = y[k] + ( (step_size / 2) * (ode_func(UpperCamelCase__ , y[k] ) + ode_func(x + step_size , UpperCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
361
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
295
0
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ ) -> None: _snake_case = order # a_{0} ... a_{k} _snake_case = [1.0] + [0.0] * order # b_{0} ... b_{k} _snake_case = [1.0] + [0.0] * order # x[n-1] ... x[n-k] _snake_case = [0.0] * self.order # y[n-1] ... y[n-k] _snake_case = [0.0] * self.order def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None: if len(lowerCAmelCase_ ) < self.order: _snake_case = [1.0, *a_coeffs] if len(lowerCAmelCase_ ) != self.order + 1: _snake_case = ( F'''Expected a_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(lowerCAmelCase_ )}''' ) raise ValueError(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) != self.order + 1: _snake_case = ( F'''Expected b_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(lowerCAmelCase_ )}''' ) raise ValueError(lowerCAmelCase_ ) _snake_case = a_coeffs _snake_case = b_coeffs def lowerCAmelCase ( self , lowerCAmelCase_ ) -> float: _snake_case = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _snake_case = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _snake_case = self.input_history[:-1] _snake_case = self.output_history[:-1] _snake_case = sample _snake_case = result return result
362
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> Any: return len(self.encoder ) def lowerCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
295
0
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers UpperCAmelCase_ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> Dict: '''simple docstring''' require_version(deps[pkg] , UpperCamelCase__ )
363
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase_ = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: _snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) else: _snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) _snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) _snake_case = ['key_proj', 'value_proj', 'query_proj'] _snake_case = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: _snake_case = key.split('.' ) if attributes[0] == "lm_head": _snake_case = prophet _snake_case = prophet_old else: _snake_case = prophet.prophetnet _snake_case = prophet_old.model _snake_case = False for attribute in attributes: if attribute in mapping: _snake_case = mapping[attribute] if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0: _snake_case = attribute elif hasattr(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _snake_case = old_model.weight logger.info(F'''{attribute} is initialized.''' ) _snake_case = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _snake_case = old_model.bias logger.info(F'''{attribute} is initialized''' ) _snake_case = True break elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ): _snake_case = old_model.in_proj_weight.shape[0] // 3 _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _snake_case = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." _snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] ) _snake_case = True break if attribute.isdigit(): _snake_case = model[int(UpperCamelCase__ )] _snake_case = old_model[int(UpperCamelCase__ )] else: _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if old_attribute == "": _snake_case = old_model else: if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
295
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def lowerCamelCase__ ( ) -> Tuple: '''simple docstring''' _snake_case = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _snake_case = bs[:] _snake_case = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 _snake_case = [chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = set() _snake_case = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case = char return pairs class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple: _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _snake_case = json.load(lowerCAmelCase_ ) _snake_case = {v: k for k, v in self.encoder.items()} _snake_case = errors # how to handle errors in decoding _snake_case = bytes_to_unicode() _snake_case = {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle: _snake_case = merges_handle.read().split('\n' )[1:-1] _snake_case = [tuple(merge.split() ) for merge in bpe_merges] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = {} _snake_case = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def lowerCAmelCase ( self ) -> Any: return len(self.encoder ) def lowerCAmelCase ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if token in self.cache: return self.cache[token] _snake_case = tuple(lowerCAmelCase_ ) _snake_case = get_pairs(lowerCAmelCase_ ) if not pairs: return token while True: _snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case = bigram _snake_case = [] _snake_case = 0 while i < len(lowerCAmelCase_ ): try: _snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case = j if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case = tuple(lowerCAmelCase_ ) _snake_case = new_word if len(lowerCAmelCase_ ) == 1: break else: _snake_case = get_pairs(lowerCAmelCase_ ) _snake_case = ' '.join(lowerCAmelCase_ ) _snake_case = word return word def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]: _snake_case = [] for token in re.findall(self.pat , lowerCAmelCase_ ): _snake_case = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: return self.decoder.get(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: _snake_case = ''.join(lowerCAmelCase_ ) _snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' ) _snake_case = 0 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) _snake_case = token_index writer.write(' '.join(lowerCAmelCase_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str: _snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()): _snake_case = ' ' + text return (text, kwargs)
364
import random def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict: '''simple docstring''' _snake_case = {i: [] for i in range(UpperCamelCase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(UpperCamelCase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(UpperCamelCase__ ): for j in range(i + 1 , UpperCamelCase__ ): if random.random() < probability: graph[i].append(UpperCamelCase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(UpperCamelCase__ ) return graph def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict: '''simple docstring''' return { i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
295
0
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib UpperCAmelCase_ = { """debug""": logging.DEBUG, """info""": logging.INFO, """warning""": logging.WARNING, """error""": logging.ERROR, """critical""": logging.CRITICAL, } UpperCAmelCase_ = logging.WARNING def lowerCamelCase__ ( ) -> Optional[int]: '''simple docstring''' _snake_case = os.getenv('DATASETS_VERBOSITY' , UpperCamelCase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'''Unknown option DATASETS_VERBOSITY={env_level_str}, ''' F'''has to be one of: { ", ".join(log_levels.keys() ) }''' ) return _default_log_level def lowerCamelCase__ ( ) -> str: '''simple docstring''' return __name__.split('.' )[0] def lowerCamelCase__ ( ) -> logging.Logger: '''simple docstring''' return logging.getLogger(_get_library_name() ) def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowerCamelCase__ ( UpperCamelCase__ : Optional[str] = None ) -> logging.Logger: '''simple docstring''' if name is None: _snake_case = _get_library_name() return logging.getLogger(UpperCamelCase__ ) def lowerCamelCase__ ( ) -> int: '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def lowerCamelCase__ ( UpperCamelCase__ : int ) -> None: '''simple docstring''' _get_library_root_logger().setLevel(UpperCamelCase__ ) def lowerCamelCase__ ( ) -> int: '''simple docstring''' return set_verbosity(UpperCamelCase__ ) def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' return set_verbosity(UpperCamelCase__ ) def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' return set_verbosity(UpperCamelCase__ ) def lowerCamelCase__ ( ) -> int: '''simple docstring''' return set_verbosity(UpperCamelCase__ ) def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = False def lowerCamelCase__ ( ) -> None: '''simple docstring''' _snake_case = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCamelCase_ : def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: # pylint: disable=unused-argument _snake_case = args[0] if args else None def __iter__( self ) -> str: return iter(self._iterator ) def __getattr__( self , lowerCAmelCase_ ) -> List[str]: def empty_fn(*lowerCAmelCase_ , **lowerCAmelCase_ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> str: return self def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: return UpperCAmelCase_ = True class UpperCamelCase_ : def __call__( self , *lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> Union[str, Any]: if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) else: return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> str: if _tqdm_active: return tqdm_lib.tqdm.get_lock() UpperCAmelCase_ = _tqdm_cls() def lowerCamelCase__ ( ) -> bool: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' global _tqdm_active _snake_case = True def lowerCamelCase__ ( ) -> int: '''simple docstring''' global _tqdm_active _snake_case = False
365
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = encoder_stride _snake_case = num_attention_outputs _snake_case = embed_dim _snake_case = embed_dim + 1 _snake_case = resolution _snake_case = depths _snake_case = hidden_sizes _snake_case = dim _snake_case = mlp_expansion_ratio def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self ) -> Tuple: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = self.type_sequence_label_size _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def lowerCAmelCase ( self ) -> Optional[Any]: pass def lowerCAmelCase ( self ) -> str: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): _snake_case = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: _snake_case = seq_length * self.model_tester.chunk_length else: _snake_case = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _snake_case = outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> str: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): _snake_case = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase ( self ) -> Dict: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _snake_case = model_class(lowerCAmelCase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _snake_case = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _snake_case = model(lowerCAmelCase_ ) self.assertTrue(outputs_dict is not None ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self ) -> Dict: return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
295
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") UpperCAmelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase_ : lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self ) -> Any: _snake_case = {} if self.train_dir is not None: _snake_case = self.train_dir if self.validation_dir is not None: _snake_case = self.validation_dir _snake_case = data_files if data_files else None @dataclass class UpperCamelCase_ : lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowerCamelCase )} , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class UpperCamelCase_ : def __init__( self , lowerCAmelCase_=192 , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_=0.6 ) -> Tuple: _snake_case = input_size _snake_case = mask_patch_size _snake_case = model_patch_size _snake_case = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size' ) _snake_case = self.input_size // self.mask_patch_size _snake_case = self.mask_patch_size // self.model_patch_size _snake_case = self.rand_size**2 _snake_case = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ) -> Any: _snake_case = np.random.permutation(self.token_count )[: self.mask_count] _snake_case = np.zeros(self.token_count , dtype=lowerCAmelCase_ ) _snake_case = 1 _snake_case = mask.reshape((self.rand_size, self.rand_size) ) _snake_case = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' _snake_case = torch.stack([example['pixel_values'] for example in examples] ) _snake_case = torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _snake_case = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _snake_case = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _snake_case = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _snake_case = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _snake_case = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCamelCase__ ) and data_args.train_val_split > 0.0: _snake_case = ds['train'].train_test_split(data_args.train_val_split ) _snake_case = split['train'] _snake_case = split['test'] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _snake_case = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _snake_case = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCamelCase__ ) elif model_args.model_name_or_path: _snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ ) else: _snake_case = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(UpperCamelCase__ , 'decoder_type' ): _snake_case = 'simmim' # adapt config _snake_case = model_args.image_size if model_args.image_size is not None else config.image_size _snake_case = model_args.patch_size if model_args.patch_size is not None else config.patch_size _snake_case = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _snake_case = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase__ ) elif model_args.model_name_or_path: _snake_case = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ ) else: _snake_case = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _snake_case = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _snake_case = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _snake_case = AutoModelForMaskedImageModeling.from_config(UpperCamelCase__ ) if training_args.do_train: _snake_case = ds['train'].column_names else: _snake_case = ds['validation'].column_names if data_args.image_column_name is not None: _snake_case = data_args.image_column_name elif "image" in column_names: _snake_case = 'image' elif "img" in column_names: _snake_case = 'img' else: _snake_case = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _snake_case = Compose( [ Lambda(lambda UpperCamelCase__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _snake_case = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(UpperCamelCase__ : Dict ): _snake_case = [transforms(UpperCamelCase__ ) for image in examples[image_column_name]] _snake_case = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _snake_case = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCamelCase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _snake_case = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCamelCase__ ) # Initialize our trainer _snake_case = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , ) # Training if training_args.do_train: _snake_case = None if training_args.resume_from_checkpoint is not None: _snake_case = training_args.resume_from_checkpoint elif last_checkpoint is not None: _snake_case = last_checkpoint _snake_case = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _snake_case = trainer.evaluate() trainer.log_metrics('eval' , UpperCamelCase__ ) trainer.save_metrics('eval' , UpperCamelCase__ ) # Write model card and (optionally) push to hub _snake_case = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling'], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) if __name__ == "__main__": main()
366
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = LEDTokenizer lowerCAmelCase_ = LEDTokenizerFast lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> List[str]: super().setUp() _snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _snake_case = {'unk_token': '<unk>'} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase_ ) ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self ) -> Optional[Any]: return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def lowerCAmelCase ( self ) -> Union[str, Any]: return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' ) self.assertIn('input_ids' , lowerCAmelCase_ ) self.assertIn('attention_mask' , lowerCAmelCase_ ) self.assertNotIn('labels' , lowerCAmelCase_ ) self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ ) @require_torch def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer( ['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['A long paragraph for summarization.'] _snake_case = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' ) _snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' ) _snake_case = inputs['input_ids'] _snake_case = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCAmelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case = ['Summary of the text.', 'Another summary.'] _snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']] _snake_case = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Tuple: pass def lowerCAmelCase ( self ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = 'A, <mask> AllenNLP sentence.' _snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
295
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
367
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = BertTokenizer lowerCAmelCase_ = BertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english def lowerCAmelCase ( self ) -> Optional[int]: super().setUp() _snake_case = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = 'UNwant\u00E9d,running' _snake_case = 'unwanted, running' return input_text, output_text def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.tokenizer_class(self.vocab_file ) _snake_case = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def lowerCAmelCase ( self ) -> Tuple: if not self.test_rust_tokenizer: return _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing _snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) _snake_case = 'UNwant\u00E9d,running' _snake_case = tokenizer.tokenize(lowerCAmelCase_ ) _snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.get_rust_tokenizer() _snake_case = tokenizer.encode(lowerCAmelCase_ ) _snake_case = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase ( self ) -> Any: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase ( self ) -> Dict: _snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = BasicTokenizer() _snake_case = 'a\n\'ll !!to?\'d of, can\'t.' _snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _snake_case = {} for i, token in enumerate(lowerCAmelCase_ ): _snake_case = i _snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase ( self ) -> Tuple: self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase ( self ) -> Dict: self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase ( self ) -> int: self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase ( self ) -> Tuple: _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' ) _snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) _snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def lowerCAmelCase ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _snake_case = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) _snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False _snake_case = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase ( self ) -> str: _snake_case = ['的', '人', '有'] _snake_case = ''.join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case = True _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) _snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) _snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". _snake_case = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
295
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever UpperCAmelCase_ = logging.getLogger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[int]: super().__init__( lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , ) _snake_case = None def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: logger.info('initializing retrieval' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('dist initialized' ) # needs to be set manually _snake_case = self._infer_socket_ifname() # avoid clash with the NCCL port _snake_case = str(distributed_port + 1 ) _snake_case = dist.new_group(ranks=lowerCAmelCase_ , backend='gloo' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('dist not initialized / main' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowerCAmelCase ( self ) -> Tuple: return dist.get_rank(group=self.process_group ) == 0 def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ) -> List[str]: _snake_case = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ ) dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group ) return target_tensor def lowerCAmelCase ( self ) -> Dict: _snake_case = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _snake_case = next((addr for addr in addrs if addr.startswith('e' )) , lowerCAmelCase_ ) return ifname def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[np.ndarray, List[dict]]: # single GPU training if not dist.is_initialized(): _snake_case , _snake_case = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ ) # distributed training _snake_case = dist.get_world_size(group=self.process_group ) # gather logic _snake_case = None if self._is_main(): _snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )] dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group ) # scatter logic _snake_case = question_hidden_states.shape[0] _snake_case = [] _snake_case = [] if self._is_main(): assert len(lowerCAmelCase_ ) == world_size _snake_case , _snake_case = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ ) _snake_case , _snake_case = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ ) _snake_case = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa ) _snake_case = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
368
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
295
0
import math import sys def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' if number != int(UpperCamelCase__ ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 _snake_case = [-1] * (number + 1) _snake_case = 0 for i in range(1 , number + 1 ): _snake_case = sys.maxsize _snake_case = int(math.sqrt(UpperCamelCase__ ) ) for j in range(1 , root + 1 ): _snake_case = 1 + answers[i - (j**2)] _snake_case = min(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
369
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path UpperCAmelCase_ = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = None lowerCAmelCase_ = None def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: with TemporaryDirectory() as tmp_dir: _snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) _snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ ) _snake_case = builder_cls( cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , ) _snake_case = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) _snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ) self.assertTrue(os.path.exists(lowerCAmelCase_ ) ) @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' _snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _snake_case = None builder_instance.download_and_prepare() _snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ ) _snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ ) _snake_case = builder_cls( cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) _snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert "train" in ds assert isinstance(ds['train'] , UpperCamelCase__ ) assert next(iter(ds['train'] ) )
295
0
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> list: '''simple docstring''' _snake_case = [0] * len(UpperCamelCase__ ) for i in range(1 , len(UpperCamelCase__ ) ): # use last results for better performance - dynamic programming _snake_case = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _snake_case = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _snake_case = j return prefix_result def lowerCamelCase__ ( UpperCamelCase__ : str ) -> int: '''simple docstring''' return max(prefix_function(UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
370
def lowerCamelCase__ ( ) -> int: '''simple docstring''' return 1 def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int: '''simple docstring''' return two_pound(UpperCamelCase__ ) if __name__ == "__main__": print(solution(int(input().strip())))
295
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_attention_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_choices def lowerCAmelCase ( self ) -> Tuple: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_attention_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self ) -> str: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = True lowerCAmelCase_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self ) -> Any: _snake_case = FlaxRoFormerModelTester(self ) @slow def lowerCAmelCase ( self ) -> List[Any]: for model_class_name in self.all_model_classes: _snake_case = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=lowerCAmelCase_ ) _snake_case = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase_ ) @require_flax class UpperCamelCase_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self ) -> str: _snake_case = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] ) _snake_case = model(lowerCAmelCase_ )[0] _snake_case = 5_0000 _snake_case = (1, 6, vocab_size) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
371
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple: '''simple docstring''' _snake_case = [0 for i in range(r + 1 )] # nc0 = 1 _snake_case = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _snake_case = min(UpperCamelCase__ , UpperCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
295
0
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def lowerCamelCase__ ( UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None ) -> Optional[Any]: '''simple docstring''' return field(default_factory=lambda: default , metadata=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCAmelCase_ = field( metadata={'''help''': '''The csv file to plot.'''} , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) lowerCAmelCase_ = field( default=_lowerCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) lowerCAmelCase_ = list_field( default=_lowerCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def lowerCamelCase__ ( UpperCamelCase__ : str ) -> str: '''simple docstring''' try: int(UpperCamelCase__ ) return True except ValueError: return False def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> List[str]: '''simple docstring''' try: float(UpperCamelCase__ ) return True except ValueError: return False class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ ) -> Tuple: _snake_case = args _snake_case = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: _snake_case = csv.DictReader(lowerCAmelCase_ ) for row in reader: _snake_case = row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None _snake_case = int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None _snake_case = float(row['result'] ) def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = plt.subplots() _snake_case = 'Time usage' if self.args.is_time else 'Memory usage' _snake_case = title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _snake_case = sorted(set(self.result_dict[model_name]['bsz'] ) ) _snake_case = sorted(set(self.result_dict[model_name]['seq_len'] ) ) _snake_case = self.result_dict[model_name]['result'] ((_snake_case) , (_snake_case)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _snake_case = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _snake_case = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase_ , ) else: _snake_case = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_snake_case) , (_snake_case)) = ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) _snake_case = np.asarray(lowerCAmelCase_ , lowerCAmelCase_ )[: len(lowerCAmelCase_ )] plt.scatter( lowerCAmelCase_ , lowerCAmelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(lowerCAmelCase_ , lowerCAmelCase_ , '--' ) title_str += F''' {label_model_name} vs.''' _snake_case = title_str[:-4] _snake_case = 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase_ ) plt.xlabel(lowerCAmelCase_ ) plt.ylabel(lowerCAmelCase_ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def lowerCamelCase__ ( ) -> Optional[Any]: '''simple docstring''' _snake_case = HfArgumentParser(UpperCamelCase__ ) _snake_case = parser.parse_args_into_dataclasses()[0] _snake_case = Plot(args=UpperCamelCase__ ) plot.plot() if __name__ == "__main__": main()
350
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]: super().__init__() _snake_case = nn.Sequential( nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , ) _snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = False _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.ModuleList() for lyr_num in range(lowerCAmelCase_ ): # FiLM conditional T5 decoder _snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) self.decoders.append(lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = nn.Dropout(p=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case , _snake_case , _snake_case = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _snake_case = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _snake_case = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _snake_case = torch.broadcast_to( torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _snake_case = self.position_encoding(lowerCAmelCase_ ) _snake_case = self.continuous_inputs_projection(lowerCAmelCase_ ) inputs += position_encodings _snake_case = self.dropout(lowerCAmelCase_ ) # decoder: No padding present. _snake_case = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings _snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _snake_case = lyr( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0] _snake_case = self.decoder_norm(lowerCAmelCase_ ) _snake_case = self.post_dropout(lowerCAmelCase_ ) _snake_case = self.spec_out(lowerCAmelCase_ ) return spec_out class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple: super().__init__() _snake_case = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple: _snake_case = self.layer[0]( lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) if encoder_hidden_states is not None: _snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _snake_case = self.layer[1]( lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , ) # Apply Film Conditional Feed Forward layer _snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ ) return (hidden_states,) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: super().__init__() _snake_case = TaLayerNorm(lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str: # pre_self_attention_layer_norm _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ ) # Self-attention block _snake_case = self.attention(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__() _snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict: _snake_case = self.layer_norm(lowerCAmelCase_ ) _snake_case = self.attention( lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return layer_output class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: super().__init__() _snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) _snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ ) _snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]: _snake_case = self.layer_norm(lowerCAmelCase_ ) if conditioning_emb is not None: _snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.DenseReluDense(lowerCAmelCase_ ) _snake_case = hidden_states + self.dropout(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.Dropout(lowerCAmelCase_ ) _snake_case = NewGELUActivation() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any: _snake_case = self.act(self.wi_a(lowerCAmelCase_ ) ) _snake_case = self.wi_a(lowerCAmelCase_ ) _snake_case = hidden_gelu * hidden_linear _snake_case = self.dropout(lowerCAmelCase_ ) _snake_case = self.wo(lowerCAmelCase_ ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str: super().__init__() _snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) ) _snake_case = eps def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ ) _snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _snake_case = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class UpperCamelCase_ ( nn.Module ): def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) )) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: super().__init__() _snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = self.scale_bias(lowerCAmelCase_ ) _snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 ) _snake_case = x * (1 + scale) + shift return x
295
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = AltDiffusionPipeline lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _snake_case = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , ) torch.manual_seed(0 ) _snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) _snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) _snake_case = CLIPTextModel(lowerCAmelCase_ ) _snake_case = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) _snake_case = 77 _snake_case = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> List[str]: if str(lowerCAmelCase_ ).startswith('mps' ): _snake_case = torch.manual_seed(lowerCAmelCase_ ) else: _snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) _snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self ) -> List[str]: super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def lowerCAmelCase ( self ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() torch.manual_seed(0 ) _snake_case = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _snake_case = RobertaSeriesModelWithTransformation(lowerCAmelCase_ ) _snake_case = text_encoder _snake_case = AltDiffusionPipeline(**lowerCAmelCase_ ) _snake_case = alt_pipe.to(lowerCAmelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = 'A photo of an astronaut' _snake_case = alt_pipe(**lowerCAmelCase_ ) _snake_case = output.images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case = np.array( [0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self ) -> str: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ ) torch.manual_seed(0 ) _snake_case = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _snake_case = RobertaSeriesModelWithTransformation(lowerCAmelCase_ ) _snake_case = text_encoder _snake_case = AltDiffusionPipeline(**lowerCAmelCase_ ) _snake_case = alt_pipe.to(lowerCAmelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = alt_pipe(**lowerCAmelCase_ ) _snake_case = output.images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _snake_case = np.array( [0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def lowerCAmelCase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self ) -> Tuple: # make sure here that pndm scheduler skips prk _snake_case = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase_ ) _snake_case = alt_pipe.to(lowerCAmelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = 'A painting of a squirrel eating a burger' _snake_case = torch.manual_seed(0 ) _snake_case = alt_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' ) _snake_case = output.images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _snake_case = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self ) -> List[Any]: _snake_case = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' ) _snake_case = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ ) _snake_case = alt_pipe.to(lowerCAmelCase_ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = 'A painting of a squirrel eating a burger' _snake_case = torch.manual_seed(0 ) _snake_case = alt_pipe([prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='numpy' ) _snake_case = output.images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _snake_case = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
351
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''gpt_neo''' lowerCAmelCase_ = ['''past_key_values'''] lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple: _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_layers _snake_case = num_heads _snake_case = intermediate_size _snake_case = window_size _snake_case = activation_function _snake_case = resid_dropout _snake_case = embed_dropout _snake_case = attention_dropout _snake_case = classifier_dropout _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = use_cache _snake_case = bos_token_id _snake_case = eos_token_id _snake_case = attention_types _snake_case = self.expand_attention_types_params(lowerCAmelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any: '''simple docstring''' import torch _snake_case = input.size() _snake_case = len(UpperCamelCase__ ) _snake_case = shape[dimension] _snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ ) _snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1 _snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None] _snake_case = [slice(UpperCamelCase__ )] * rank _snake_case = indices _snake_case = input[s] _snake_case = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str: '''simple docstring''' import torch _snake_case = torch.arange(1 , UpperCamelCase__ ) _snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = remainders == 0 _snake_case = candidates[divisor_indices] _snake_case = torch.max(UpperCamelCase__ ) return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' ) class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) _snake_case = {0: 'batch', 1: 'past_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowerCAmelCase ( self ) -> int: return self._config.num_heads def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]: _snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) # We need to order the input in the way they appears in the forward() _snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers ) ] _snake_case = common_inputs['attention_mask'] if self.use_past: _snake_case = ordered_inputs['attention_mask'].dtype _snake_case = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self ) -> int: return 13
295
0
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int: '''simple docstring''' _snake_case = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCamelCase__ ( UpperCamelCase__ : int = 100 ) -> int: '''simple docstring''' _snake_case = 1 _snake_case = 2 for i in range(2 , max_n + 1 ): _snake_case = pre_numerator _snake_case = 2 * i // 3 if i % 3 == 0 else 1 _snake_case = cur_numerator _snake_case = e_cont * pre_numerator + temp return sum_digits(UpperCamelCase__ ) if __name__ == "__main__": print(F"{solution() = }")
352
from cva import destroyAllWindows, imread, imshow, waitKey def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' _snake_case , _snake_case = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _snake_case = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase_ = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
295
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase_ : @property def lowerCAmelCase ( self ) -> int: return self.get_dummy_input() @property def lowerCAmelCase ( self ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]: _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) return dummy_input def lowerCAmelCase ( self ) -> Tuple: _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) unet_block.to(lowerCAmelCase_ ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowerCAmelCase ( self ) -> Tuple: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() _snake_case = model(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ ) _snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward()
353
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' _snake_case = VideoMAEConfig() set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ ) if "finetuned" not in model_name: _snake_case = False if "finetuned" in model_name: _snake_case = 'huggingface/label-files' if "kinetics" in model_name: _snake_case = 400 _snake_case = 'kinetics400-id2label.json' elif "ssv2" in model_name: _snake_case = 174 _snake_case = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) _snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) _snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} return config def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int: '''simple docstring''' if "small" in model_name: _snake_case = 384 _snake_case = 1_536 _snake_case = 12 _snake_case = 16 _snake_case = 12 _snake_case = 3 _snake_case = 192 _snake_case = 768 elif "large" in model_name: _snake_case = 1_024 _snake_case = 4_096 _snake_case = 24 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 512 _snake_case = 2_048 elif "huge" in model_name: _snake_case = 1_280 _snake_case = 5_120 _snake_case = 32 _snake_case = 16 _snake_case = 12 _snake_case = 8 _snake_case = 640 _snake_case = 2_560 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple: '''simple docstring''' if "encoder." in name: _snake_case = name.replace('encoder.' , '' ) if "cls_token" in name: _snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: _snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: _snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: _snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: _snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: _snake_case = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: _snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: _snake_case = name.replace('attn' , 'attention.self' ) if "attn" in name: _snake_case = name.replace('attn' , 'attention.attention' ) if "norm1" in name: _snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: _snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: _snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: _snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: _snake_case = name.replace('head' , 'classifier' ) return name def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): _snake_case = orig_state_dict.pop(UpperCamelCase__ ) if key.startswith('encoder.' ): _snake_case = key.replace('encoder.' , '' ) if "qkv" in key: _snake_case = key.split('.' ) if key.startswith('decoder.blocks' ): _snake_case = config.decoder_hidden_size _snake_case = int(key_split[2] ) _snake_case = 'decoder.decoder_layers.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = config.hidden_size _snake_case = int(key_split[1] ) _snake_case = 'videomae.encoder.layer.' if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = val return orig_state_dict def lowerCamelCase__ ( ) -> Union[str, Any]: '''simple docstring''' _snake_case = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) _snake_case = np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]: '''simple docstring''' _snake_case = get_videomae_config(UpperCamelCase__ ) if "finetuned" in model_name: _snake_case = VideoMAEForVideoClassification(UpperCamelCase__ ) else: _snake_case = VideoMAEForPreTraining(UpperCamelCase__ ) # download original checkpoint, hosted on Google Drive _snake_case = 'pytorch_model.bin' gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' ) if "model" in files: _snake_case = files['model'] else: _snake_case = files['module'] _snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() # verify model on basic input _snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _snake_case = prepare_video() _snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' ) if "finetuned" not in model_name: _snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) _snake_case = torch.load(UpperCamelCase__ ) _snake_case = model(**UpperCamelCase__ ) _snake_case = outputs.logits _snake_case = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one _snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": _snake_case = torch.Size([1, 400] ) _snake_case = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": _snake_case = torch.Size([1, 1_408, 1_536] ) _snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": _snake_case = torch.Size([1, 174] ) _snake_case = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": _snake_case = outputs.loss assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(UpperCamelCase__ , organization='nielsr' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase_ = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
295
0
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int: '''simple docstring''' while second != 0: _snake_case = first & second first ^= second _snake_case = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = int(input("""Enter the first number: """).strip()) UpperCAmelCase_ = int(input("""Enter the second number: """).strip()) print(F"{add(first, second) = }")
354
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
295
0
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Dict: super().__init__() _snake_case = nn.Linear(3 , 4 ) _snake_case = nn.BatchNormad(4 ) _snake_case = nn.Linear(4 , 5 ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase_ ) ) ) class UpperCamelCase_ ( _lowerCamelCase ): def lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase_ ( _lowerCamelCase ): def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: return output + 1 class UpperCamelCase_ ( unittest.TestCase ): def lowerCAmelCase ( self ) -> List[Any]: _snake_case = ModelForTest() _snake_case = ModelHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(test_model._hf_hook , lowerCAmelCase_ ) self.assertTrue(hasattr(lowerCAmelCase_ , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase_ ) self.assertFalse(hasattr(lowerCAmelCase_ , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase_ , '_old_forward' ) ) def lowerCAmelCase ( self ) -> Dict: _snake_case = ModelForTest() _snake_case = ModelHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ , append=lowerCAmelCase_ ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase_ , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase_ ) self.assertFalse(hasattr(lowerCAmelCase_ , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase_ , '_old_forward' ) ) def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = ModelForTest() _snake_case = torch.randn(2 , 3 ) _snake_case = test_model(x + 1 ) _snake_case = test_model(x + 2 ) _snake_case = PreForwardHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _snake_case = PreForwardHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _snake_case = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-5 ) def lowerCAmelCase ( self ) -> Any: _snake_case = ModelForTest() _snake_case = torch.randn(2 , 3 ) _snake_case = test_model(lowerCAmelCase_ ) _snake_case = PostForwardHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) self.assertTrue(torch.allclose(lowerCAmelCase_ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _snake_case = PostForwardHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) self.assertTrue(torch.allclose(lowerCAmelCase_ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _snake_case = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , output + 2 , atol=1E-5 ) def lowerCAmelCase ( self ) -> int: _snake_case = ModelForTest() _snake_case = torch.randn(2 , 3 ) _snake_case = test_model(lowerCAmelCase_ ) _snake_case = PostForwardHook() add_hook_to_module(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = test_model(lowerCAmelCase_ ) self.assertTrue(torch.allclose(lowerCAmelCase_ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _snake_case = True _snake_case = test_model(lowerCAmelCase_ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def lowerCAmelCase ( self ) -> List[str]: _snake_case = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase_ , AlignDevicesHook(io_same_device=lowerCAmelCase_ ) ) _snake_case = torch.randn(2 , 3 ).to(0 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , torch.device(0 ) ) def lowerCAmelCase ( self ) -> Any: _snake_case = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _snake_case = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase_ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase_ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase_ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _snake_case = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase_ ) _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , lowerCAmelCase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload _snake_case = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase_ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase_ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase_ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , lowerCAmelCase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def lowerCAmelCase ( self ) -> Any: _snake_case = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _snake_case = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase_ , execution_device=lowerCAmelCase_ , offload=lowerCAmelCase_ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _snake_case = torch.device(lowerCAmelCase_ ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase_ ) _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , lowerCAmelCase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase_ , execution_device=lowerCAmelCase_ , offload=lowerCAmelCase_ , offload_buffers=lowerCAmelCase_ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , lowerCAmelCase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices _snake_case = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase_ , execution_device=lowerCAmelCase_ , offload=lowerCAmelCase_ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device _snake_case = torch.device(lowerCAmelCase_ ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase_ ) _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , lowerCAmelCase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase_ , execution_device=lowerCAmelCase_ , offload=lowerCAmelCase_ , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase_ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) _snake_case = torch.randn(2 , 3 ) _snake_case = model(lowerCAmelCase_ ) self.assertEqual(output.device , lowerCAmelCase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
355
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ = { """configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["""VivitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ """VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """VivitModel""", """VivitPreTrainedModel""", """VivitForVideoClassification""", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
356
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' _snake_case = OmegaConf.load(UpperCamelCase__ ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] _snake_case = list(state_dict.keys() ) # extract state_dict for VQVAE _snake_case = {} _snake_case = 'first_stage_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] # extract state_dict for UNetLDM _snake_case = {} _snake_case = 'model.diffusion_model.' for key in keys: if key.startswith(UpperCamelCase__ ): _snake_case = state_dict[key] _snake_case = config.model.params.first_stage_config.params _snake_case = config.model.params.unet_config.params _snake_case = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _snake_case = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _snake_case = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) UpperCAmelCase_ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
295
0
from __future__ import annotations def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> tuple[float, list[float]]: '''simple docstring''' _snake_case = list(range(len(UpperCamelCase__ ) ) ) _snake_case = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _snake_case = 0 _snake_case = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _snake_case = 1 max_value += value[i] capacity -= weight[i] else: _snake_case = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
357
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase_ : @property def lowerCAmelCase ( self ) -> int: return self.get_dummy_input() @property def lowerCAmelCase ( self ) -> Optional[Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]: _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ) return dummy_input def lowerCAmelCase ( self ) -> Tuple: _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) unet_block.to(lowerCAmelCase_ ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def lowerCAmelCase ( self ) -> Tuple: _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() _snake_case = model(**lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = output[0] _snake_case = torch.device(lowerCAmelCase_ ) _snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ ) _snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward()
295
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCAmelCase_ = """▁""" class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = AlbertTokenizer def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , **lowerCAmelCase_ , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _snake_case = ( AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token ) super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = False if not self.vocab_file else True def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
358
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case , _snake_case = 9, 14 # noqa: F841 _snake_case = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _snake_case = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _snake_case = mst(UpperCamelCase__ ) _snake_case = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _snake_case = tuple(answer[:2] ) _snake_case = tuple(edge[::-1] ) assert edge in result or reverse in result
295
0
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor UpperCAmelCase_ = random.Random() def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=None ) -> Any: '''simple docstring''' if rng is None: _snake_case = global_rng _snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class UpperCamelCase_ ( unittest.TestCase ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=400 , lowerCAmelCase_=2000 , lowerCAmelCase_=24 , lowerCAmelCase_=24 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_6000 , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> Optional[Any]: _snake_case = parent _snake_case = batch_size _snake_case = min_seq_length _snake_case = max_seq_length _snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _snake_case = feature_size _snake_case = num_mel_bins _snake_case = padding_value _snake_case = sampling_rate _snake_case = return_attention_mask _snake_case = do_normalize def lowerCAmelCase ( self ) -> Dict: return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Optional[Any]: def _flatten(lowerCAmelCase_ ): return list(itertools.chain(*lowerCAmelCase_ ) ) if equal_length: _snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _snake_case = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _snake_case = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = SpeechaTextFeatureExtractionTester(self ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) ) def lowerCAmelCase ( self ) -> Optional[Any]: # Tests that all call wrap to encode_plus and batch_encode_plus _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs] # Test feature size _snake_case = feature_extractor(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input _snake_case = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features _snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # Test batched _snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features _snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)] _snake_case = np.asarray(lowerCAmelCase_ ) _snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features _snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = ['longest', 'max_length', 'do_not_pad'] _snake_case = [None, 16, None] for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = feature_extractor( lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = [np.sum(lowerCAmelCase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = ['longest', 'max_length', 'do_not_pad'] _snake_case = [None, 16, None] for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = feature_extractor( lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = [np.sum(lowerCAmelCase_ ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = feature_extractor( lowerCAmelCase_ , padding='max_length' , max_length=4 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowerCAmelCase ( self ) -> str: _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = feature_extractor( lowerCAmelCase_ , padding='longest' , max_length=4 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = feature_extractor( lowerCAmelCase_ , padding='longest' , max_length=16 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowerCAmelCase ( self ) -> Any: import torch _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = np.random.rand(100 , 32 ).astype(np.floataa ) _snake_case = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: from datasets import load_dataset _snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _snake_case = ds.sort('id' ).select(range(lowerCAmelCase_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowerCAmelCase ( self ) -> Any: # fmt: off _snake_case = np.array([ -1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41, -1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28, -1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25, ] ) # fmt: on _snake_case = self._load_datasamples(1 ) _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='pt' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
359
from collections.abc import Sequence def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float: '''simple docstring''' if not arr: return 0 _snake_case = 0 if allow_empty_subarrays else float('-inf' ) _snake_case = 0.0 for num in arr: _snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num ) _snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
295
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = '''ibert''' def __init__( self , lowerCAmelCase_=3_0522 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=False , lowerCAmelCase_="none" , **lowerCAmelCase_ , ) -> Tuple: super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = position_embedding_type _snake_case = quant_mode _snake_case = force_dequant class UpperCamelCase_ ( _lowerCamelCase ): @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _snake_case = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
360
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class UpperCamelCase_ ( enum.Enum ): lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 2 @add_end_docstrings(_lowerCamelCase ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _snake_case = None if self.model.config.prefix is not None: _snake_case = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _snake_case = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params ) _snake_case = {**self._preprocess_params, **preprocess_params} _snake_case = {**self._forward_params, **forward_params} def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple: _snake_case = {} if prefix is not None: _snake_case = prefix if prefix: _snake_case = self.tokenizer( lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']' ) _snake_case = handle_long_generation preprocess_params.update(lowerCAmelCase_ ) _snake_case = generate_kwargs _snake_case = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) _snake_case = ReturnType.TENSORS if return_type is not None: _snake_case = return_type if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if stop_sequence is not None: _snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) _snake_case = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any: _snake_case = self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework ) _snake_case = prompt_text if handle_long_generation == "hole": _snake_case = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _snake_case = generate_kwargs['max_new_tokens'] else: _snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _snake_case = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) _snake_case = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _snake_case = inputs['attention_mask'][:, -keep_length:] return inputs def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: _snake_case = model_inputs['input_ids'] _snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ ) # Allow empty prompts if input_ids.shape[1] == 0: _snake_case = None _snake_case = None _snake_case = 1 else: _snake_case = input_ids.shape[0] _snake_case = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _snake_case = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: _snake_case = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _snake_case = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case = generated_sequence.shape[0] if self.framework == "pt": _snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int: _snake_case = model_outputs['generated_sequence'][0] _snake_case = model_outputs['input_ids'] _snake_case = model_outputs['prompt_text'] _snake_case = generated_sequence.numpy().tolist() _snake_case = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _snake_case = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _snake_case = self.tokenizer.decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _snake_case = 0 else: _snake_case = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) ) if return_type == ReturnType.FULL_TEXT: _snake_case = prompt_text + text[prompt_length:] else: _snake_case = text[prompt_length:] _snake_case = {'generated_text': all_text} records.append(lowerCAmelCase_ ) return records
295
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' if os.path.exists(UpperCamelCase__ ): if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , 'config.json' ) ): os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) ) if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ): os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) else: os.makedirs(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : str=False ) -> List[Any]: '''simple docstring''' _snake_case = 2 if unlogit: _snake_case = torch.pow(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = p * torch.log(UpperCamelCase__ ) _snake_case = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str: '''simple docstring''' logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) ) for row in range(len(UpperCamelCase__ ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]: '''simple docstring''' _snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) _snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) if head_mask is None: _snake_case = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCamelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case = None _snake_case = 0.0 _snake_case = 0.0 for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case = tuple(t.to(args.device ) for t in inputs ) ((_snake_case ) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCamelCase__ ): _snake_case = entropy(attn.detach() , UpperCamelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case = 2 _snake_case = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(UpperCamelCase__ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(UpperCamelCase__ ) logger.info('Head ranked by importance scores' ) _snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case = torch.arange( head_importance.numel() , device=args.device ) _snake_case = head_ranks.view_as(UpperCamelCase__ ) print_ad_tensor(UpperCamelCase__ ) return attn_entropy, head_importance, total_loss def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> int: '''simple docstring''' _snake_case , _snake_case , _snake_case = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ ) _snake_case = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold ) _snake_case = torch.ones_like(UpperCamelCase__ ) _snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case = original_score while current_score >= original_score * args.masking_threshold: _snake_case = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case = float('Inf' ) _snake_case = head_importance.view(-1 ).sort()[1] if len(UpperCamelCase__ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case = new_head_mask.view(-1 ) _snake_case = 0.0 _snake_case = new_head_mask.view_as(UpperCamelCase__ ) _snake_case = new_head_mask.clone().detach() print_ad_tensor(UpperCamelCase__ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ ) _snake_case = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(UpperCamelCase__ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> int: '''simple docstring''' _snake_case = datetime.now() _snake_case , _snake_case , _snake_case = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ ) _snake_case = 1 / loss _snake_case = datetime.now() - before_time _snake_case = sum(p.numel() for p in model.parameters() ) _snake_case = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = [ v, ] assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCamelCase__ ) _snake_case = sum(p.numel() for p in model.parameters() ) _snake_case = datetime.now() _snake_case , _snake_case , _snake_case = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , ) _snake_case = 1 / loss _snake_case = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(UpperCamelCase__ , args.output_dir ) def lowerCamelCase__ ( ) -> Optional[Any]: '''simple docstring''' _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=UpperCamelCase__ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' ) parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 ) parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' ) _snake_case = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case = torch.device('cuda' , args.local_rank ) _snake_case = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case = nn.parallel.DistributedDataParallel( UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ ) elif args.n_gpu > 1: _snake_case = nn.DataParallel(UpperCamelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , UpperCamelCase__ ) # Prepare dataset _snake_case = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case = (torch.from_numpy(UpperCamelCase__ ),) _snake_case = TensorDataset(*UpperCamelCase__ ) _snake_case = RandomSampler(UpperCamelCase__ ) _snake_case = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
361
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
295
0