code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def _snake_case ( __snake_case ): if divisor % 5 == 0 or divisor % 2 == 0: return 0 _UpperCamelCase = 1 _UpperCamelCase = 1 while repunit: _UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _snake_case ( __snake_case = 1000000 ): _UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(lowercase__ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
from PIL import Image def _snake_case ( __snake_case , __snake_case ): def brightness(__snake_case ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCAmelCase = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
from collections.abc import Callable import numpy as np def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) ) _UpperCamelCase = np.zeros((n + 1,) ) _UpperCamelCase = ya _UpperCamelCase = xa for k in range(_UpperCamelCase ): _UpperCamelCase = y[k] + step_size * ode_func(_UpperCamelCase , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _snake_case ( __snake_case ): _UpperCamelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _UpperCamelCase = s_dict.pop(__snake_case ) elif "subsample" in key: _UpperCamelCase = s_dict.pop(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' ) _UpperCamelCase = mam_aaa['''args'''] _UpperCamelCase = mam_aaa['''model'''] _UpperCamelCase = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__snake_case ) rename_keys(__snake_case ) _UpperCamelCase = state_dict['''decoder.embed_tokens.weight'''].shape[0] _UpperCamelCase = args.share_decoder_input_output_embed _UpperCamelCase = [int(__snake_case ) for i in args.conv_kernel_sizes.split(''',''' )] _UpperCamelCase = SpeechaTextConfig( vocab_size=__snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=__snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__snake_case , num_beams=5 , max_length=200 , use_cache=__snake_case , decoder_start_token_id=2 , early_stopping=__snake_case , ) _UpperCamelCase = SpeechaTextForConditionalGeneration(__snake_case ) _UpperCamelCase , _UpperCamelCase = model.model.load_state_dict(__snake_case , strict=__snake_case ) if len(__snake_case ) > 0 and not set(__snake_case ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: _UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCamelCase = lm_head_weights model.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") _lowerCAmelCase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _lowerCAmelCase = "\\n Text data.\n Second line of data." _lowerCAmelCase = "file" @pytest.fixture(scope='''session''' ) def _snake_case ( __snake_case ): _UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _UpperCamelCase = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' ) with zstd.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return path @pytest.fixture def _snake_case ( __snake_case ): with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _UpperCamelCase = input_paths[compression_format] _UpperCamelCase = tmp_path / '''cache''' _UpperCamelCase = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE ) _UpperCamelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE ) as f: _UpperCamelCase = f.read() with open(_SCREAMING_SNAKE_CASE ) as f: _UpperCamelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = '''custom_cache''' _UpperCamelCase = '''custom_extracted_dir''' _UpperCamelCase = tmp_path / '''custom_extracted_path''' if default_extracted: _UpperCamelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _SCREAMING_SNAKE_CASE ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _UpperCamelCase = xz_file _UpperCamelCase = ( DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE ) ) _UpperCamelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected def _snake_case ( __snake_case ): # absolute path _UpperCamelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve() ) assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file # relative path _UpperCamelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file def _snake_case ( __snake_case ): # absolute path _UpperCamelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path(_SCREAMING_SNAKE_CASE ) # relative path _UpperCamelCase = '''./__missing_file__.txt''' with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path(_SCREAMING_SNAKE_CASE ) def _snake_case ( __snake_case ): _UpperCamelCase = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(_SCREAMING_SNAKE_CASE ) as f: _UpperCamelCase = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE ) def _snake_case ( ): with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE ) def _snake_case ( __snake_case ): _UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_SCREAMING_SNAKE_CASE ): http_get('''https://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE ) def _snake_case ( __snake_case ): _UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_SCREAMING_SNAKE_CASE ): ftp_get('''ftp://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE ) def _snake_case ( __snake_case ): _UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(_SCREAMING_SNAKE_CASE ): fsspec_get('''s3://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): fsspec_head('''s3://huggingface.co''' )
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self : Any , _A : Any=None , **_A : Union[str, Any] ): super().__init__(features=__UpperCamelCase ) _UpperCamelCase = torch_tensor_kwargs import torch # noqa import torch at initialization def UpperCamelCase_ ( self : Any , _A : int ): import torch if isinstance(__UpperCamelCase , __UpperCamelCase ) and column: if all( isinstance(__UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(__UpperCamelCase ) return column def UpperCamelCase_ ( self : str , _A : Any ): import torch if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ): return value elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() _UpperCamelCase = {} if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): _UpperCamelCase = {'''dtype''': torch.intaa} elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): _UpperCamelCase = {'''dtype''': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__UpperCamelCase , PIL.Image.Image ): _UpperCamelCase = np.asarray(__UpperCamelCase ) return torch.tensor(__UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def UpperCamelCase_ ( self : int , _A : Any ): import torch # support for torch, tf, jax etc. if hasattr(__UpperCamelCase , '''__array__''' ) and not isinstance(__UpperCamelCase , torch.Tensor ): _UpperCamelCase = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] ) elif isinstance(__UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(__UpperCamelCase ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[Any] ): return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase ) def UpperCamelCase_ ( self : Optional[int] , _A : Optional[int] ): _UpperCamelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase ) _UpperCamelCase = self.python_features_decoder.decode_row(__UpperCamelCase ) return self.recursive_tensorize(__UpperCamelCase ) def UpperCamelCase_ ( self : int , _A : str ): _UpperCamelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase ) _UpperCamelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] ) _UpperCamelCase = self.recursive_tensorize(__UpperCamelCase ) _UpperCamelCase = self._consolidate(__UpperCamelCase ) return column def UpperCamelCase_ ( self : Any , _A : List[str] ): _UpperCamelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase ) _UpperCamelCase = self.python_features_decoder.decode_batch(__UpperCamelCase ) _UpperCamelCase = self.recursive_tensorize(__UpperCamelCase ) for column_name in batch: _UpperCamelCase = self._consolidate(batch[column_name] ) return batch
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCAmelCase = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['YolosFeatureExtractor'] _lowerCAmelCase = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_ : def __init__( self : List[str] , _A : List[Any] = None , _A : List[Any] = None , _A : Optional[Any]=None , _A : Union[str, Any]=None ): if not conversation_id: _UpperCamelCase = uuid.uuida() if past_user_inputs is None: _UpperCamelCase = [] if generated_responses is None: _UpperCamelCase = [] _UpperCamelCase = conversation_id _UpperCamelCase = past_user_inputs _UpperCamelCase = generated_responses _UpperCamelCase = text def __eq__( self : Union[str, Any] , _A : List[Any] ): if not isinstance(lowercase__ , lowercase__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCamelCase_ ( self : List[str] , _A : List[str] , _A : int = False ): if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) _UpperCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: _UpperCamelCase = text def UpperCamelCase_ ( self : Tuple ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) _UpperCamelCase = None def UpperCamelCase_ ( self : Dict , _A : List[str] ): self.generated_responses.append(lowercase__ ) def UpperCamelCase_ ( self : Optional[Any] ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : int ): _UpperCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): _UpperCamelCase = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( _UpperCAmelCase, r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ", ) class lowerCAmelCase_ ( _UpperCAmelCase ): def __init__( self : List[str] , *_A : List[str] , **_A : Tuple ): super().__init__(*lowercase__ , **lowercase__ ) if self.tokenizer.pad_token_id is None: _UpperCamelCase = self.tokenizer.eos_token def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Optional[Any]=None , **_A : List[Any] ): _UpperCamelCase = {} _UpperCamelCase = {} _UpperCamelCase = {} if min_length_for_response is not None: _UpperCamelCase = min_length_for_response if minimum_tokens is not None: _UpperCamelCase = minimum_tokens if "max_length" in generate_kwargs: _UpperCamelCase = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowercase__ ) return preprocess_params, forward_params, postprocess_params def __call__( self : Any , _A : Any , _A : Dict=0 , **_A : Union[str, Any] ): _UpperCamelCase = super().__call__(lowercase__ , num_workers=lowercase__ , **lowercase__ ) if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) == 1: return outputs[0] return outputs def UpperCamelCase_ ( self : Optional[int] , _A : List[Any] , _A : str=32 ): if not isinstance(lowercase__ , lowercase__ ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): _UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version _UpperCamelCase = self._legacy_parse_and_tokenize(lowercase__ ) if self.framework == "pt": _UpperCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": _UpperCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Any=10 , **_A : Any ): _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) _UpperCamelCase = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) _UpperCamelCase = max_length - minimum_tokens _UpperCamelCase = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: _UpperCamelCase = model_inputs["""attention_mask"""][:, -trim:] _UpperCamelCase = model_inputs.pop('''conversation''' ) _UpperCamelCase = max_length _UpperCamelCase = self.model.generate(**lowercase__ , **lowercase__ ) if self.model.config.is_encoder_decoder: _UpperCamelCase = 1 else: _UpperCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int=True ): _UpperCamelCase = model_outputs["""output_ids"""] _UpperCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , ) _UpperCamelCase = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(lowercase__ ) return conversation def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ): _UpperCamelCase = self.tokenizer.eos_token_id _UpperCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ) if len(lowercase__ ) > self.tokenizer.model_max_length: _UpperCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ): UpperCAmelCase = "xlnet" UpperCAmelCase = ["mems"] UpperCAmelCase = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[str] , _A : List[str]=3_2000 , _A : List[Any]=1024 , _A : List[str]=24 , _A : Union[str, Any]=16 , _A : Tuple=4096 , _A : Any="gelu" , _A : Dict=True , _A : Tuple="bi" , _A : Tuple=0.02 , _A : Dict=1e-12 , _A : Union[str, Any]=0.1 , _A : str=512 , _A : int=None , _A : Optional[int]=True , _A : Optional[int]=False , _A : Optional[Any]=False , _A : Any=-1 , _A : Any=False , _A : Optional[int]="last" , _A : int=True , _A : Any="tanh" , _A : Any=0.1 , _A : int=5 , _A : str=5 , _A : Optional[int]=5 , _A : Dict=1 , _A : Optional[int]=2 , **_A : Union[str, Any] , ): _UpperCamelCase = vocab_size _UpperCamelCase = d_model _UpperCamelCase = n_layer _UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) _UpperCamelCase = d_model // n_head _UpperCamelCase = ff_activation _UpperCamelCase = d_inner _UpperCamelCase = untie_r _UpperCamelCase = attn_type _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = dropout _UpperCamelCase = mem_len _UpperCamelCase = reuse_len _UpperCamelCase = bi_data _UpperCamelCase = clamp_len _UpperCamelCase = same_length _UpperCamelCase = summary_type _UpperCamelCase = summary_use_proj _UpperCamelCase = summary_activation _UpperCamelCase = summary_last_dropout _UpperCamelCase = start_n_top _UpperCamelCase = end_n_top _UpperCamelCase = bos_token_id _UpperCamelCase = pad_token_id _UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , __snake_case , ) _UpperCamelCase = kwargs['''use_cache'''] _UpperCamelCase = use_mems_eval _UpperCamelCase = use_mems_train super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) @property def UpperCamelCase_ ( self : Dict ): logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def UpperCamelCase_ ( self : Optional[Any] , _A : int ): # Message copied from Transformer-XL documentation raise NotImplementedError( F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _lowerCAmelCase = """true""" def _snake_case ( __snake_case , __snake_case=82 , __snake_case=16 ): set_seed(42 ) _UpperCamelCase = RegressionModel() _UpperCamelCase = deepcopy(__snake_case ) _UpperCamelCase = RegressionDataset(length=__snake_case ) _UpperCamelCase = DataLoader(__snake_case , batch_size=__snake_case ) model.to(accelerator.device ) _UpperCamelCase , _UpperCamelCase = accelerator.prepare(__snake_case , __snake_case ) return model, ddp_model, dataloader def _snake_case ( __snake_case , __snake_case=False ): _UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) _UpperCamelCase = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(__snake_case ): _UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs with accelerator.main_process_first(): _UpperCamelCase = dataset.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) _UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case ): if use_longest: return tokenizer.pad(__snake_case , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(__snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return DataLoader(__snake_case , shuffle=__snake_case , collate_fn=__snake_case , batch_size=16 ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = Accelerator(dispatch_batches=__snake_case , split_batches=__snake_case ) _UpperCamelCase = get_dataloader(__snake_case , not dispatch_batches ) _UpperCamelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__snake_case ) _UpperCamelCase , _UpperCamelCase = accelerator.prepare(__snake_case , __snake_case ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case ( __snake_case , __snake_case , __snake_case ): _UpperCamelCase = [] for batch in dataloader: _UpperCamelCase , _UpperCamelCase = batch.values() with torch.no_grad(): _UpperCamelCase = model(__snake_case ) _UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) _UpperCamelCase , _UpperCamelCase = [], [] for logit, targ in logits_and_targets: logits.append(__snake_case ) targs.append(__snake_case ) _UpperCamelCase , _UpperCamelCase = torch.cat(__snake_case ), torch.cat(__snake_case ) return logits, targs def _snake_case ( __snake_case , __snake_case=82 , __snake_case=False , __snake_case=False , __snake_case=16 ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_basic_setup(__snake_case , __snake_case , __snake_case ) _UpperCamelCase , _UpperCamelCase = generate_predictions(__snake_case , __snake_case , __snake_case ) assert ( len(__snake_case ) == num_samples ), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__snake_case )}""" def _snake_case ( __snake_case = False , __snake_case = False ): _UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) _UpperCamelCase , _UpperCamelCase = get_mrpc_setup(__snake_case , __snake_case ) # First do baseline _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = setup['''no'''] model.to(__snake_case ) model.eval() for batch in dataloader: batch.to(__snake_case ) with torch.inference_mode(): _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__snake_case , references=batch['''labels'''] ) _UpperCamelCase = metric.compute() # Then do distributed _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = outputs.logits.argmax(dim=-1 ) _UpperCamelCase = batch['''labels'''] _UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__snake_case , references=__snake_case ) _UpperCamelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def _snake_case ( ): _UpperCamelCase = Accelerator(split_batches=__snake_case , dispatch_batches=__snake_case ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(__snake_case , __snake_case ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: _UpperCamelCase = Accelerator(split_batches=__snake_case , dispatch_batches=__snake_case ) if accelerator.is_local_main_process: print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(__snake_case , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) _UpperCamelCase = Accelerator() test_torch_metrics(__snake_case , 512 ) accelerator.state._reset_state() def _snake_case ( __snake_case ): main() if __name__ == "__main__": main()
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
from math import pow def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _UpperCamelCase = int(pow(lowercase_ , lowercase_ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _UpperCamelCase = backtrack( lowercase_ , lowercase_ , current_number + 1 , lowercase_ , lowercase_ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _UpperCamelCase = backtrack( lowercase_ , lowercase_ , current_number + 1 , lowercase_ , lowercase_ ) return current_sum, solutions_count def _snake_case ( __snake_case , __snake_case ): if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(lowercase_ , lowercase_ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
from cva import destroyAllWindows, imread, imshow, waitKey def _snake_case ( __snake_case ): # getting number of pixels in the image _UpperCamelCase , _UpperCamelCase = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): _UpperCamelCase = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _lowerCAmelCase = imread("image_data/lena.jpg", 1) # convert to its negative _lowerCAmelCase = convert_to_negative(img) # show result image imshow("negative of original image", img) waitKey(0) destroyAllWindows()
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
import qiskit def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register _UpperCamelCase = qiskit.QuantumCircuit(__snake_case , __snake_case ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator _UpperCamelCase = qiskit.execute(__snake_case , __snake_case , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__snake_case ) if __name__ == "__main__": print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time _lowerCAmelCase = Lock() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case_ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() _UpperCamelCase = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left _UpperCamelCase = min(snake_case_ , snake_case_ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case_ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() _UpperCamelCase = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right _UpperCamelCase = max(snake_case_ , snake_case_ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case_ ) def _snake_case ( __snake_case ): _UpperCamelCase = [] _UpperCamelCase = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop _UpperCamelCase = Pipe() _UpperCamelCase = Pipe() process_array_.append( Process( target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) _UpperCamelCase = temp_rs _UpperCamelCase = temp_rr for i in range(1 , len(snake_case_ ) - 1 ): _UpperCamelCase = Pipe() _UpperCamelCase = Pipe() process_array_.append( Process( target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) _UpperCamelCase = temp_rs _UpperCamelCase = temp_rr process_array_.append( Process( target=snake_case_ , args=( len(snake_case_ ) - 1, arr[len(snake_case_ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case_ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case_ ) ): _UpperCamelCase = result_pipe[p][0].recv() process_array_[p].join() return arr def _snake_case ( ): _UpperCamelCase = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*snake_case_ ) _UpperCamelCase = odd_even_transposition(snake_case_ ) print('''Sorted List\n''' ) print(*snake_case_ ) if __name__ == "__main__": main()
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 _lowerCAmelCase = { # 1536-bit 5: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, } class lowerCAmelCase_ : def __init__( self : Union[str, Any] , _A : Union[str, Any] = 14 ): if group not in primes: raise ValueError('''Unsupported Group''' ) _UpperCamelCase = primes[group]['''prime'''] _UpperCamelCase = primes[group]['''generator'''] _UpperCamelCase = int(hexlify(urandom(32 ) ) , base=16 ) def UpperCamelCase_ ( self : List[Any] ): return hex(self.__private_key )[2:] def UpperCamelCase_ ( self : str ): _UpperCamelCase = pow(self.generator , self.__private_key , self.prime ) return hex(UpperCamelCase__ )[2:] def UpperCamelCase_ ( self : Optional[int] , _A : Dict ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(UpperCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[int] ): _UpperCamelCase = int(UpperCamelCase__ , base=16 ) if not self.is_valid_public_key(UpperCamelCase__ ): raise ValueError('''Invalid public key''' ) _UpperCamelCase = pow(UpperCamelCase__ , self.__private_key , self.prime ) return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest() @staticmethod def UpperCamelCase_ ( _A : Dict , _A : List[Any] ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(UpperCamelCase__ , (prime - 1) // 2 , UpperCamelCase__ ) == 1 ) @staticmethod def UpperCamelCase_ ( _A : List[str] , _A : Dict , _A : Tuple = 14 ): _UpperCamelCase = int(UpperCamelCase__ , base=16 ) _UpperCamelCase = int(UpperCamelCase__ , base=16 ) _UpperCamelCase = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Invalid public key''' ) _UpperCamelCase = pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase_ ( lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ): UpperCAmelCase = AutoencoderKL UpperCAmelCase = "sample" UpperCAmelCase = 1e-2 @property def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = 4 _UpperCamelCase = 3 _UpperCamelCase = (32, 32) _UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase ) return {"sample": image} @property def UpperCamelCase_ ( self : Union[str, Any] ): return (3, 32, 32) @property def UpperCamelCase_ ( self : List[str] ): return (3, 32, 32) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : List[str] ): pass def UpperCamelCase_ ( self : str ): pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.model_class(**_lowerCamelCase ) model.to(_lowerCamelCase ) assert not model.is_gradient_checkpointing and model.training _UpperCamelCase = model(**_lowerCamelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _UpperCamelCase = torch.randn_like(_lowerCamelCase ) _UpperCamelCase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _UpperCamelCase = self.model_class(**_lowerCamelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_lowerCamelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _UpperCamelCase = model_a(**_lowerCamelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _UpperCamelCase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) _UpperCamelCase = dict(model.named_parameters() ) _UpperCamelCase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_lowerCamelCase ) _UpperCamelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase_ ( self : int ): _UpperCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) _UpperCamelCase = model.to(_lowerCamelCase ) model.eval() if torch_device == "mps": _UpperCamelCase = torch.manual_seed(0 ) else: _UpperCamelCase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 ) _UpperCamelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCamelCase = image.to(_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample _UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _UpperCamelCase = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": _UpperCamelCase = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: _UpperCamelCase = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Tuple , _A : Optional[Any] , _A : List[Any] ): return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy""" def UpperCamelCase_ ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[Any]=0 , _A : List[Any]=(4, 3, 512, 512) , _A : Dict=False ): _UpperCamelCase = torch.floataa if fpaa else torch.floataa _UpperCamelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase ) return image def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any]="CompVis/stable-diffusion-v1-4" , _A : Any=False ): _UpperCamelCase = '''fp16''' if fpaa else None _UpperCamelCase = torch.floataa if fpaa else torch.floataa _UpperCamelCase = AutoencoderKL.from_pretrained( _lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , ) model.to(_lowerCamelCase ).eval() return model def UpperCamelCase_ ( self : List[Any] , _A : Optional[Any]=0 ): if torch_device == "mps": return torch.manual_seed(_lowerCamelCase ) return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase_ ( self : int , _A : int , _A : Union[str, Any] , _A : Tuple ): _UpperCamelCase = self.get_sd_vae_model() _UpperCamelCase = self.get_sd_image(_lowerCamelCase ) _UpperCamelCase = self.get_generator(_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample assert sample.shape == image.shape _UpperCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _UpperCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] ): _UpperCamelCase = self.get_sd_vae_model(fpaa=_lowerCamelCase ) _UpperCamelCase = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase ) _UpperCamelCase = self.get_generator(_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample assert sample.shape == image.shape _UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _UpperCamelCase = torch.tensor(_lowerCamelCase ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def UpperCamelCase_ ( self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Tuple ): _UpperCamelCase = self.get_sd_vae_model() _UpperCamelCase = self.get_sd_image(_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model(_lowerCamelCase ).sample assert sample.shape == image.shape _UpperCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu() _UpperCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase_ ( self : Any , _A : Optional[int] , _A : Tuple ): _UpperCamelCase = self.get_sd_vae_model() _UpperCamelCase = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _UpperCamelCase = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().cpu() _UpperCamelCase = torch.tensor(_lowerCamelCase ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Union[str, Any] ): _UpperCamelCase = self.get_sd_vae_model(fpaa=_lowerCamelCase ) _UpperCamelCase = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] _UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu() _UpperCamelCase = torch.tensor(_lowerCamelCase ) assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def UpperCamelCase_ ( self : Tuple , _A : int ): _UpperCamelCase = self.get_sd_vae_model(fpaa=_lowerCamelCase ) _UpperCamelCase = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model.decode(_lowerCamelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _UpperCamelCase = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def UpperCamelCase_ ( self : Tuple , _A : List[str] ): _UpperCamelCase = self.get_sd_vae_model() _UpperCamelCase = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): _UpperCamelCase = model.decode(_lowerCamelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _UpperCamelCase = model.decode(_lowerCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def UpperCamelCase_ ( self : List[Any] , _A : str , _A : Optional[Any] ): _UpperCamelCase = self.get_sd_vae_model() _UpperCamelCase = self.get_sd_image(_lowerCamelCase ) _UpperCamelCase = self.get_generator(_lowerCamelCase ) with torch.no_grad(): _UpperCamelCase = model.encode(_lowerCamelCase ).latent_dist _UpperCamelCase = dist.sample(generator=_lowerCamelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _UpperCamelCase = sample[0, -1, -3:, -3:].flatten().cpu() _UpperCamelCase = torch.tensor(_lowerCamelCase ) _UpperCamelCase = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowerCAmelCase = datasets.utils.logging.get_logger(__name__) _lowerCAmelCase = ["names", "prefix"] _lowerCAmelCase = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _lowerCAmelCase = ["encoding_errors", "on_bad_lines"] _lowerCAmelCase = ["date_format"] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): UpperCAmelCase = "," UpperCAmelCase = None UpperCAmelCase = "infer" UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = False UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = "." UpperCAmelCase = None UpperCAmelCase = "\"" UpperCAmelCase = 0 UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = 0 UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = None UpperCAmelCase = 10000 UpperCAmelCase = None UpperCAmelCase = "strict" UpperCAmelCase = "error" UpperCAmelCase = None def UpperCamelCase_ ( self : Any ): if self.delimiter is not None: _UpperCamelCase = self.delimiter if self.column_names is not None: _UpperCamelCase = self.column_names @property def UpperCamelCase_ ( self : int ): _UpperCamelCase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _A ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): UpperCAmelCase = CsvConfig def UpperCamelCase_ ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ ( self : str , _A : str ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): _UpperCamelCase = data_files if isinstance(_A , _A ): _UpperCamelCase = [files] _UpperCamelCase = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): _UpperCamelCase = [files] _UpperCamelCase = [dl_manager.iter_files(_A ) for file in files] splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict ): if self.config.features is not None: _UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(_A ) for feature in self.config.features.values() ): # cheaper cast _UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_A ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCamelCase = table_cast(_A , _A ) return pa_table def UpperCamelCase_ ( self : Tuple , _A : int ): _UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_A ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): _UpperCamelCase = pd.read_csv(_A , iterator=_A , dtype=_A , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_A ): _UpperCamelCase = pa.Table.from_pandas(_A ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_A ) except ValueError as e: logger.error(F"""Failed to read file \'{file}\' with error {type(_A )}: {e}""" ) raise
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DDIMParallelScheduler,) UpperCAmelCase = (('eta', 0.0), ('num_inference_steps', 50)) def UpperCamelCase_ ( self : Optional[int] , **_A : List[str] ): _UpperCamelCase = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**__A ) return config def UpperCamelCase_ ( self : Tuple , **_A : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(**__A ) _UpperCamelCase = scheduler_class(**__A ) _UpperCamelCase , _UpperCamelCase = 10, 0.0 _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter scheduler.set_timesteps(__A ) for t in scheduler.timesteps: _UpperCamelCase = model(__A , __A ) _UpperCamelCase = scheduler.step(__A , __A , __A , __A ).prev_sample return sample def UpperCamelCase_ ( self : Union[str, Any] ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__A ) def UpperCamelCase_ ( self : Union[str, Any] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__A ) _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(steps_offset=1 ) _UpperCamelCase = scheduler_class(**__A ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def UpperCamelCase_ ( self : str ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__A , beta_end=__A ) def UpperCamelCase_ ( self : Union[str, Any] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__A ) def UpperCamelCase_ ( self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def UpperCamelCase_ ( self : List[Any] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__A ) def UpperCamelCase_ ( self : Any ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__A ) def UpperCamelCase_ ( self : Union[str, Any] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__A ) def UpperCamelCase_ ( self : Tuple ): self.check_over_configs(thresholding=__A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__A , prediction_type=__A , sample_max_value=__A , ) def UpperCamelCase_ ( self : Optional[int] ): for t in [1, 10, 49]: self.check_over_forward(time_step=__A ) def UpperCamelCase_ ( self : Tuple ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=__A , num_inference_steps=__A ) def UpperCamelCase_ ( self : Any ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__A , eta=__A ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**__A ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**__A ) _UpperCamelCase , _UpperCamelCase = 10, 0.0 scheduler.set_timesteps(__A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter _UpperCamelCase = self.dummy_sample_deter + 0.1 _UpperCamelCase = self.dummy_sample_deter - 0.1 _UpperCamelCase = samplea.shape[0] _UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) _UpperCamelCase = torch.arange(__A )[0:3, None].repeat(1 , __A ) _UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _UpperCamelCase = scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A ) _UpperCamelCase = torch.sum(torch.abs(__A ) ) _UpperCamelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.full_loop() _UpperCamelCase = torch.sum(torch.abs(__A ) ) _UpperCamelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.22_3967 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCamelCase = torch.sum(torch.abs(__A ) ) _UpperCamelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def UpperCamelCase_ ( self : List[str] ): # We specify different beta, so that the first alpha is 0.99 _UpperCamelCase = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 ) _UpperCamelCase = torch.sum(torch.abs(__A ) ) _UpperCamelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def UpperCamelCase_ ( self : Optional[Any] ): # We specify different beta, so that the first alpha is 0.99 _UpperCamelCase = self.full_loop(set_alpha_to_one=__A , beta_start=0.01 ) _UpperCamelCase = torch.sum(torch.abs(__A ) ) _UpperCamelCase = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class lowerCAmelCase_ : def __init__( self : str , _A : List[Any] , _A : Any=14 , _A : Optional[Any]=7 , _A : Optional[int]=True , _A : List[Any]=True , _A : int=False , _A : str=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Union[str, Any]=4 , _A : List[Any]=4 , _A : int=4 , _A : List[str]=37 , _A : int="gelu" , _A : List[Any]=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : int=0.02 , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = rotary_dim _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = initializer_range _UpperCamelCase = None _UpperCamelCase = vocab_size - 1 _UpperCamelCase = vocab_size - 1 _UpperCamelCase = vocab_size - 1 def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def UpperCamelCase_ ( self : int , _A : int , _A : Any , _A : Any , _A : Optional[Any] ): _UpperCamelCase = 20 _UpperCamelCase = model_class_name(__A ) _UpperCamelCase = model.init_cache(input_ids.shape[0] , __A ) _UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) _UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _UpperCamelCase = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) _UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) _UpperCamelCase = model( input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , ) _UpperCamelCase = model(__A ) _UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" ) def UpperCamelCase_ ( self : Dict , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] ): _UpperCamelCase = 20 _UpperCamelCase = model_class_name(__A ) _UpperCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) _UpperCamelCase = model.init_cache(input_ids.shape[0] , __A ) _UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) _UpperCamelCase = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) _UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) _UpperCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , ) _UpperCamelCase = model(__A , attention_mask=__A ) _UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" ) @require_flax class lowerCAmelCase_ ( UpperCamelCase__, UpperCamelCase__, unittest.TestCase ): UpperCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () UpperCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = FlaxGPTJModelTester(self ) def UpperCamelCase_ ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__A , __A , __A , __A ) def UpperCamelCase_ ( self : Dict ): for model_class_name in self.all_model_classes: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __A , __A , __A , __A ) @tooslow def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) _UpperCamelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=__A , truncation=__A ) _UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) _UpperCamelCase = False _UpperCamelCase = model.config.eos_token_id _UpperCamelCase = jax.jit(model.generate ) _UpperCamelCase = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences _UpperCamelCase = tokenizer.batch_decode(__A , skip_special_tokens=__A ) _UpperCamelCase = [ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(__A , __A ) @is_pt_flax_cross_test def UpperCamelCase_ ( self : Any ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _UpperCamelCase = self._prepare_for_class(__A , __A ) _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning _UpperCamelCase = getattr(__A , __A ) _UpperCamelCase , _UpperCamelCase = pt_inputs['''input_ids'''].shape _UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = pt_model_class(__A ).eval() _UpperCamelCase = model_class(__A , dtype=jnp.floataa ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A ) _UpperCamelCase = fx_state with torch.no_grad(): _UpperCamelCase = pt_model(**__A ).to_tuple() _UpperCamelCase = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__A ) _UpperCamelCase = model_class.from_pretrained(__A , from_pt=__A ) _UpperCamelCase = fx_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def UpperCamelCase_ ( self : Any ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs _UpperCamelCase = self._prepare_for_class(__A , __A ) _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class _UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning _UpperCamelCase = getattr(__A , __A ) _UpperCamelCase = pt_model_class(__A ).eval() _UpperCamelCase = model_class(__A , dtype=jnp.floataa ) _UpperCamelCase = load_flax_weights_in_pytorch_model(__A , fx_model.params ) _UpperCamelCase , _UpperCamelCase = pt_inputs['''input_ids'''].shape _UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = 0 _UpperCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): _UpperCamelCase = pt_model(**__A ).to_tuple() _UpperCamelCase = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__A ) _UpperCamelCase = pt_model_class.from_pretrained(__A , from_flax=__A ) with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def UpperCamelCase_ ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} _lowerCAmelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } _lowerCAmelCase = { '''allenai/longformer-base-4096''': 4_096, '''allenai/longformer-large-4096''': 4_096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4_096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4_096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _snake_case ( ): _UpperCamelCase = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) _UpperCamelCase = bs[:] _UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCAmelCase ) cs.append(2**8 + n ) n += 1 _UpperCamelCase = [chr(_UpperCAmelCase ) for n in cs] return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) def _snake_case ( __snake_case ): _UpperCamelCase = set() _UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCamelCase = char return pairs class lowerCAmelCase_ ( __UpperCAmelCase ): UpperCAmelCase = VOCAB_FILES_NAMES UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : int , _A : List[str] , _A : Any , _A : Optional[Any]="replace" , _A : Union[str, Any]="<s>" , _A : List[Any]="</s>" , _A : Tuple="</s>" , _A : Optional[int]="<s>" , _A : str="<unk>" , _A : Optional[int]="<pad>" , _A : int="<mask>" , _A : Any=False , **_A : Dict , ): _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase = json.load(__SCREAMING_SNAKE_CASE ) _UpperCamelCase = {v: k for k, v in self.encoder.items()} _UpperCamelCase = errors # how to handle errors in decoding _UpperCamelCase = bytes_to_unicode() _UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle: _UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1] _UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] _UpperCamelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) _UpperCamelCase = {} _UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def UpperCamelCase_ ( self : List[Any] ): return len(self.encoder ) def UpperCamelCase_ ( self : Optional[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] ): if token in self.cache: return self.cache[token] _UpperCamelCase = tuple(__SCREAMING_SNAKE_CASE ) _UpperCamelCase = get_pairs(__SCREAMING_SNAKE_CASE ) if not pairs: return token while True: _UpperCamelCase = min(__SCREAMING_SNAKE_CASE , key=lambda _A : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _UpperCamelCase , _UpperCamelCase = bigram _UpperCamelCase = [] _UpperCamelCase = 0 while i < len(__SCREAMING_SNAKE_CASE ): try: _UpperCamelCase = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _UpperCamelCase = j if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _UpperCamelCase = tuple(__SCREAMING_SNAKE_CASE ) _UpperCamelCase = new_word if len(__SCREAMING_SNAKE_CASE ) == 1: break else: _UpperCamelCase = get_pairs(__SCREAMING_SNAKE_CASE ) _UpperCamelCase = ''' '''.join(__SCREAMING_SNAKE_CASE ) _UpperCamelCase = word return word def UpperCamelCase_ ( self : str , _A : Optional[Any] ): _UpperCamelCase = [] for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ): _UpperCamelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) return bpe_tokens def UpperCamelCase_ ( self : Optional[int] , _A : Optional[Any] ): return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] ): return self.decoder.get(__SCREAMING_SNAKE_CASE ) def UpperCamelCase_ ( self : List[Any] , _A : List[str] ): _UpperCamelCase = ''''''.join(__SCREAMING_SNAKE_CASE ) _UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def UpperCamelCase_ ( self : Tuple , _A : str , _A : Optional[str] = None ): if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' ) _UpperCamelCase = 0 with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) _UpperCamelCase = token_index writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' ) index += 1 return vocab_file, merge_file def UpperCamelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] _UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def UpperCamelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ): _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self : Any , _A : Dict , _A : str=False , **_A : Dict ): _UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()): _UpperCamelCase = ''' ''' + text return (text, kwargs)
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_ : def __init__( self : Optional[int] , _A : Union[str, Any] , _A : int=2 , _A : List[str]=True , _A : List[Any]=False , _A : Union[str, Any]=10 , _A : Optional[int]=3 , _A : List[Any]=32 * 4 , _A : Dict=32 * 6 , _A : Optional[Any]=4 , _A : Any=32 , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = is_training _UpperCamelCase = use_auxiliary_loss _UpperCamelCase = num_queries _UpperCamelCase = num_channels _UpperCamelCase = min_size _UpperCamelCase = max_size _UpperCamelCase = num_labels _UpperCamelCase = mask_feature_size def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __A ) _UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A ) _UpperCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5 ).float() _UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long() _UpperCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self : Optional[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self : Optional[int] , _A : Union[str, Any] , _A : Dict ): _UpperCamelCase = output.encoder_hidden_states _UpperCamelCase = output.pixel_decoder_hidden_states _UpperCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers ) def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Any , _A : Dict=False ): with torch.no_grad(): _UpperCamelCase = MaskFormerModel(config=__A ) model.to(__A ) model.eval() _UpperCamelCase = model(pixel_values=__A , pixel_mask=__A ) _UpperCamelCase = model(__A , output_hidden_states=__A ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__A , __A ) def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ): _UpperCamelCase = MaskFormerForInstanceSegmentation(config=__A ) model.to(__A ) model.eval() def comm_check_on_output(_A : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCamelCase = model(pixel_values=__A , pixel_mask=__A ) _UpperCamelCase = model(__A ) comm_check_on_output(__A ) _UpperCamelCase = model( pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A ) comm_check_on_output(__A ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () UpperCAmelCase = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : int ): _UpperCamelCase = MaskFormerModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A ) def UpperCamelCase_ ( self : List[Any] ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def UpperCamelCase_ ( self : int ): pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def UpperCamelCase_ ( self : Union[str, Any] ): pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def UpperCamelCase_ ( self : int ): pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCamelCase_ ( self : Union[str, Any] ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase_ ( self : List[Any] ): pass def UpperCamelCase_ ( self : Dict ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__A ) _UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) @slow def UpperCamelCase_ ( self : int ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCamelCase = MaskFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = (self.model_tester.min_size,) * 2 _UpperCamelCase = { '''pixel_values''': torch.randn((2, 3, *size) , device=__A ), '''mask_labels''': torch.randn((2, 10, *size) , device=__A ), '''class_labels''': torch.zeros(2 , 10 , device=__A ).long(), } _UpperCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A ) _UpperCamelCase = model(**__A ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__A ).to(__A ) _UpperCamelCase = model(**__A , output_attentions=__A ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase_ ( self : Dict ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCamelCase = self.all_model_classes[1] _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs() _UpperCamelCase = model_class(__A ) model.to(__A ) model.train() _UpperCamelCase = model(__A , mask_labels=__A , class_labels=__A ).loss loss.backward() def UpperCamelCase_ ( self : Tuple ): # only MaskFormerForInstanceSegmentation has the loss _UpperCamelCase = self.all_model_classes[1] _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs() _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = model_class(__A ) model.to(__A ) model.train() _UpperCamelCase = model(__A , mask_labels=__A , class_labels=__A ) _UpperCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__A ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1E-4 def _snake_case ( ): _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Union[str, Any] ): return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__A ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(__A , return_tensors='''pt''' ).to(__A ) _UpperCamelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCamelCase = model(**__A ) _UpperCamelCase = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__A ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _UpperCamelCase = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__A ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) ) _UpperCamelCase = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__A ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(__A , return_tensors='''pt''' ).to(__A ) _UpperCamelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCamelCase = model(**__A ) # masks_queries_logits _UpperCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCamelCase = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _UpperCamelCase = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _UpperCamelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCamelCase = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__A ) .eval() ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(__A , return_tensors='''pt''' ).to(__A ) _UpperCamelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__A , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCamelCase = model(**__A ) # masks_queries_logits _UpperCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCamelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _UpperCamelCase = torch.tensor(__A ).to(__A ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) ) # class_queries_logits _UpperCamelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCamelCase = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__A ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__A ) .eval() ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) _UpperCamelCase = inputs['''pixel_values'''].to(__A ) _UpperCamelCase = [el.to(__A ) for el in inputs['''mask_labels''']] _UpperCamelCase = [el.to(__A ) for el in inputs['''class_labels''']] with torch.no_grad(): _UpperCamelCase = model(**__A ) self.assertTrue(outputs.loss is not None )
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
import unittest from knapsack import knapsack as k class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = 0 _UpperCamelCase = [0] _UpperCamelCase = [0] _UpperCamelCase = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 ) _UpperCamelCase = [60] _UpperCamelCase = [10] _UpperCamelCase = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = 3 _UpperCamelCase = [1, 2, 3] _UpperCamelCase = [3, 2, 1] _UpperCamelCase = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = 50 _UpperCamelCase = [60, 100, 120] _UpperCamelCase = [10, 20, 30] _UpperCamelCase = len(_lowercase ) self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 ) if __name__ == "__main__": unittest.main()
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
def _snake_case ( __snake_case ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __snake_case = 16 __snake_case = 32 def _snake_case ( __snake_case , __snake_case = 16 , __snake_case = "bert-base-cased" ): _UpperCamelCase = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case ): # max_length=None => use the model max length (it's actually the default) _UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _UpperCamelCase = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. _UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) _UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader def _snake_case ( __snake_case , __snake_case ): # Initialize accelerator _UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase = config['''lr'''] _UpperCamelCase = int(config['''num_epochs'''] ) _UpperCamelCase = int(config['''seed'''] ) _UpperCamelCase = int(config['''batch_size'''] ) _UpperCamelCase = args.model_name_or_path set_seed(lowerCAmelCase__ ) _UpperCamelCase , _UpperCamelCase = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) # Instantiate optimizer _UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _UpperCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ ) if accelerator.state.deepspeed_plugin is not None: _UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: _UpperCamelCase = 1 _UpperCamelCase = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _UpperCamelCase = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , ) else: _UpperCamelCase = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # We need to keep track of how many total steps we have iterated over _UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly _UpperCamelCase = 0 # Now we train the model _UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) _UpperCamelCase = 0 _UpperCamelCase = {} for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): _UpperCamelCase = model(**lowerCAmelCase__ ) _UpperCamelCase = outputs.loss _UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _UpperCamelCase = 0 for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase = model(**lowerCAmelCase__ ) _UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _UpperCamelCase , _UpperCamelCase = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase__ ) - 1: _UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] _UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) _UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase__ ) _UpperCamelCase = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: _UpperCamelCase = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def _snake_case ( ): _UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , ) parser.add_argument( '''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCAmelCase = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( snake_case_ ): def __init__( self : Dict , *_A : List[str] , **_A : str ): warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _lowerCAmelCase = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } _lowerCAmelCase = { "facebook/blenderbot_small-90M": 512, } class lowerCAmelCase_ ( _UpperCAmelCase ): UpperCAmelCase = VOCAB_FILES_NAMES UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase = BlenderbotSmallTokenizer def __init__( self : Optional[int] , _A : List[Any]=None , _A : str=None , _A : Optional[int]="<|endoftext|>" , _A : int="<|endoftext|>" , _A : Any="<|endoftext|>" , _A : List[Any]=False , _A : Dict=True , **_A : int , ): super().__init__( ByteLevelBPETokenizer( vocab=lowercase__ , merges=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , ) , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , **lowercase__ , ) _UpperCamelCase = add_prefix_space def UpperCamelCase_ ( self : List[Any] , _A : Tuple , _A : int=None ): _UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : List[str] = None ): _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = ConsistencyModelPipeline UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt UpperCAmelCase = frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def UpperCamelCase_ ( self : int ): _UpperCamelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, Any]=False ): if class_cond: _UpperCamelCase = self.dummy_cond_unet else: _UpperCamelCase = self.dummy_uncond_unet # Default to CM multistep sampler _UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCamelCase_ ( self : int , _A : Optional[int] , _A : Any=0 ): if str(_A ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(_A ) else: _UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A ) _UpperCamelCase = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = ConsistencyModelPipeline(**_A ) _UpperCamelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components(class_cond=_A ) _UpperCamelCase = ConsistencyModelPipeline(**_A ) _UpperCamelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 0 _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase_ ( self : Any ): _UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = ConsistencyModelPipeline(**_A ) _UpperCamelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 1 _UpperCamelCase = None _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components(class_cond=_A ) _UpperCamelCase = ConsistencyModelPipeline(**_A ) _UpperCamelCase = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_dummy_inputs(_A ) _UpperCamelCase = 1 _UpperCamelCase = None _UpperCamelCase = 0 _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 32, 32, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self : Optional[Any] , _A : Tuple=0 , _A : Optional[Any]=False , _A : Optional[Any]="cpu" , _A : Union[str, Any]=torch.floataa , _A : Dict=(1, 3, 64, 64) ): _UpperCamelCase = torch.manual_seed(_A ) _UpperCamelCase = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: _UpperCamelCase = self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A ) _UpperCamelCase = latents return inputs def UpperCamelCase_ ( self : str , _A : Tuple=0 , _A : Tuple="cpu" , _A : Tuple=torch.floataa , _A : str=(1, 3, 64, 64) ): if type(_A ) == str: _UpperCamelCase = torch.device(_A ) _UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A ) _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) return latents def UpperCamelCase_ ( self : str ): _UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) _UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_inputs() _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase_ ( self : str ): _UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) _UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_inputs() _UpperCamelCase = 1 _UpperCamelCase = None _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) _UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_inputs(get_fixed_latents=_A , device=_A ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ): _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) _UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCamelCase = ConsistencyModelPipeline(unet=_A , scheduler=_A ) pipe.to(torch_device=_A , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = self.get_inputs(get_fixed_latents=_A , device=_A ) _UpperCamelCase = 1 _UpperCamelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ): _UpperCamelCase = pipe(**_A ).images assert image.shape == (1, 64, 64, 3) _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( __A, unittest.TestCase ): UpperCAmelCase = XGLMTokenizer UpperCAmelCase = XGLMTokenizerFast UpperCAmelCase = True UpperCAmelCase = True def UpperCamelCase_ ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = XGLMTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = '<pad>' _UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(_A ) , 1008 ) def UpperCamelCase_ ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = XGLMTokenizer(_A , keep_accents=_A ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def UpperCamelCase_ ( self : List[Any] ): return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def UpperCamelCase_ ( self : Tuple ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) _UpperCamelCase = XGLMTokenizer(f.name , keep_accents=_A ) _UpperCamelCase = pickle.dumps(_A ) pickle.loads(_A ) def UpperCamelCase_ ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = 'I was born in 92000, and this is falsé.' _UpperCamelCase = tokenizer.tokenize(_A ) _UpperCamelCase = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) _UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A ) _UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = tokenizer.encode(_A ) _UpperCamelCase = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = 'Hello World!' _UpperCamelCase = [2, 3_1227, 4447, 35] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off _UpperCamelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = { 'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''facebook/xglm-564M''' , padding=_A , )
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
def _snake_case ( __snake_case ): _UpperCamelCase = 0 for ch in input_str: _UpperCamelCase = ord(lowerCAmelCase__ ) _UpperCamelCase = pow(2 , lowerCAmelCase__ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
import json import sys def _snake_case ( __snake_case , __snake_case ): with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f: _UpperCamelCase = json.load(lowerCAmelCase_ ) _UpperCamelCase = ["<details>", "<summary>Show updated benchmarks!</summary>", " "] for benchmark_name in sorted(lowerCAmelCase_ ): _UpperCamelCase = results[benchmark_name] _UpperCamelCase = benchmark_name.split('''/''' )[-1] output_md.append(f"""### Benchmark: {benchmark_file_name}""" ) _UpperCamelCase = "| metric |" _UpperCamelCase = "|--------|" _UpperCamelCase = "| new / old (diff) |" for metric_name in sorted(lowerCAmelCase_ ): _UpperCamelCase = benchmark_res[metric_name] _UpperCamelCase = metric_vals["new"] _UpperCamelCase = metric_vals.get('''old''' , lowerCAmelCase_ ) _UpperCamelCase = metric_vals.get('''diff''' , lowerCAmelCase_ ) _UpperCamelCase = f""" {new_val:f}""" if isinstance(lowerCAmelCase_ , (int, float) ) else "None" if old_val is not None: val_str += f""" / {old_val:f}""" if isinstance(lowerCAmelCase_ , (int, float) ) else "None" if dif_val is not None: val_str += f""" ({dif_val:f})""" if isinstance(lowerCAmelCase_ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(lowerCAmelCase_ ) ) if __name__ == "__main__": _lowerCAmelCase = sys.argv[1] _lowerCAmelCase = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
def _snake_case ( __snake_case ): _UpperCamelCase = [0] * len(_snake_case ) for i in range(1 , len(_snake_case ) ): # use last results for better performance - dynamic programming _UpperCamelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _UpperCamelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _UpperCamelCase = j return prefix_result def _snake_case ( __snake_case ): return max(prefix_function(_snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod()
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: _UpperCamelCase = TOKENIZER_CLASSES else: _UpperCamelCase = {tokenizer_name: getattr(_A , tokenizer_name + '''Fast''' )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: _UpperCamelCase = TOKENIZER_CLASSES[tokenizer_name] _UpperCamelCase = True if checkpoint_name is None: _UpperCamelCase = list(tokenizer_class.max_model_input_sizes.keys() ) else: _UpperCamelCase = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer _UpperCamelCase = tokenizer_class.from_pretrained(_A , force_download=_A ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: _UpperCamelCase , _UpperCamelCase = checkpoint.split('''/''' ) _UpperCamelCase = os.path.join(_A , _A ) elif add_prefix: _UpperCamelCase = checkpoint _UpperCamelCase = dump_path else: _UpperCamelCase = None _UpperCamelCase = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: _UpperCamelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] _UpperCamelCase = file_path.split(_A )[-1][0] if next_char == "/": _UpperCamelCase = os.path.join(_A , _A ) _UpperCamelCase = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) _UpperCamelCase = tokenizer.save_pretrained( _A , legacy_format=_A , filename_prefix=_A ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('''tokenizer.json''' ): os.remove(_A ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) _lowerCAmelCase = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self : Optional[Any] , _A : Tuple ): _UpperCamelCase = parent def UpperCamelCase_ ( self : List[Any] ): return {} def _snake_case ( ): _UpperCamelCase = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>' _UpperCamelCase = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n ' return [html_string_a, html_string_a] @require_bsa class lowerCAmelCase_ ( snake_case__, unittest.TestCase ): UpperCAmelCase = MarkupLMFeatureExtractor if is_bsa_available() else None def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = MarkupLMFeatureExtractionTester(self ) @property def UpperCamelCase_ ( self : str ): return self.feature_extract_tester.prepare_feat_extract_dict() def UpperCamelCase_ ( self : str ): # Initialize feature_extractor _UpperCamelCase = self.feature_extraction_class() # Test not batched input _UpperCamelCase = get_html_strings()[0] _UpperCamelCase = feature_extractor(_A ) # fmt: off _UpperCamelCase = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] _UpperCamelCase = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] # fmt: on self.assertEqual(encoding.nodes , _A ) self.assertEqual(encoding.xpaths , _A ) # Test batched _UpperCamelCase = get_html_strings() _UpperCamelCase = feature_extractor(_A ) # fmt: off _UpperCamelCase = expected_nodes + [['My First Heading', 'My first paragraph.']] _UpperCamelCase = expected_xpaths + [['/html/body/h1', '/html/body/p']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , _A ) self.assertEqual(encoding.xpaths , _A )
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) _lowerCAmelCase = None _lowerCAmelCase = { "7B": 11_008, "13B": 13_824, "30B": 17_920, "65B": 22_016, "70B": 28_672, } _lowerCAmelCase = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def _snake_case ( __snake_case , __snake_case=1 , __snake_case=256 ): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _snake_case ( __snake_case ): with open(__snake_case , '''r''' ) as f: return json.load(__snake_case ) def _snake_case ( __snake_case , __snake_case ): with open(__snake_case , '''w''' ) as f: json.dump(__snake_case , __snake_case ) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=True ): os.makedirs(__snake_case , exist_ok=__snake_case ) _UpperCamelCase = os.path.join(__snake_case , '''tmp''' ) os.makedirs(__snake_case , exist_ok=__snake_case ) _UpperCamelCase = read_json(os.path.join(__snake_case , '''params.json''' ) ) _UpperCamelCase = NUM_SHARDS[model_size] _UpperCamelCase = params['''n_layers'''] _UpperCamelCase = params['''n_heads'''] _UpperCamelCase = n_heads // num_shards _UpperCamelCase = params['''dim'''] _UpperCamelCase = dim // n_heads _UpperCamelCase = 10000.0 _UpperCamelCase = 1.0 / (base ** (torch.arange(0 , __snake_case , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _UpperCamelCase = params['''n_kv_heads'''] # for GQA / MQA _UpperCamelCase = n_heads_per_shard // num_key_value_heads _UpperCamelCase = dim // num_key_value_heads else: # compatibility with other checkpoints _UpperCamelCase = n_heads _UpperCamelCase = n_heads_per_shard _UpperCamelCase = dim # permute for sliced rotary def permute(__snake_case , __snake_case=n_heads , __snake_case=dim , __snake_case=dim ): return w.view(__snake_case , dima // n_heads // 2 , 2 , __snake_case ).transpose(1 , 2 ).reshape(__snake_case , __snake_case ) print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _UpperCamelCase = torch.load(os.path.join(__snake_case , '''consolidated.00.pth''' ) , map_location='''cpu''' ) else: # Sharded _UpperCamelCase = [ torch.load(os.path.join(__snake_case , f"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' ) for i in range(__snake_case ) ] _UpperCamelCase = 0 _UpperCamelCase = {'''weight_map''': {}} for layer_i in range(__snake_case ): _UpperCamelCase = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded _UpperCamelCase = { f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[f"""layers.{layer_i}.attention.wq.weight"""] ), f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[f"""layers.{layer_i}.attention.wk.weight"""] ), f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""], f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""], f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""], f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""], f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""], f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""], f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _UpperCamelCase = { f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ f"""layers.{layer_i}.attention_norm.weight""" ].clone(), f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ f"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } _UpperCamelCase = permute( torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(__snake_case , __snake_case , __snake_case ) for i in range(__snake_case ) ] , dim=0 , ).reshape(__snake_case , __snake_case ) ) _UpperCamelCase = permute( torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view( __snake_case , __snake_case , __snake_case ) for i in range(__snake_case ) ] , dim=0 , ).reshape(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case , ) _UpperCamelCase = torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view( __snake_case , __snake_case , __snake_case ) for i in range(__snake_case ) ] , dim=0 , ).reshape(__snake_case , __snake_case ) _UpperCamelCase = torch.cat( [loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(__snake_case )] , dim=1 ) _UpperCamelCase = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(__snake_case )] , dim=0 ) _UpperCamelCase = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(__snake_case )] , dim=1 ) _UpperCamelCase = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(__snake_case )] , dim=0 ) _UpperCamelCase = inv_freq for k, v in state_dict.items(): _UpperCamelCase = filename param_count += v.numel() torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) ) _UpperCamelCase = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded _UpperCamelCase = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: _UpperCamelCase = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]['''tok_embeddings.weight'''] for i in range(__snake_case )] , dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__snake_case )] , dim=0 ), } for k, v in state_dict.items(): _UpperCamelCase = filename param_count += v.numel() torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) ) # Write configs _UpperCamelCase = {'''total_size''': param_count * 2} write_json(__snake_case , os.path.join(__snake_case , '''pytorch_model.bin.index.json''' ) ) _UpperCamelCase = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 _UpperCamelCase = params['''multiple_of'''] if '''multiple_of''' in params else 256 _UpperCamelCase = LlamaConfig( hidden_size=__snake_case , intermediate_size=compute_intermediate_size(__snake_case , __snake_case , __snake_case ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=__snake_case , ) config.save_pretrained(__snake_case ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('''Loading the checkpoint in a Llama model.''' ) _UpperCamelCase = LlamaForCausalLM.from_pretrained(__snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=__snake_case ) # Avoid saving this as part of the config. del model.config._name_or_path print('''Saving in the Transformers format.''' ) model.save_pretrained(__snake_case , safe_serialization=__snake_case ) shutil.rmtree(__snake_case ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) _UpperCamelCase = tokenizer_class(__snake_case ) tokenizer.save_pretrained(__snake_case ) def _snake_case ( ): _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , ) parser.add_argument( '''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , ) parser.add_argument( '''--output_dir''' , help='''Location to write HF model and tokenizer''' , ) parser.add_argument('''--safe_serialization''' , type=__snake_case , help='''Whether or not to save using `safetensors`.''' ) _UpperCamelCase = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _UpperCamelCase = os.path.join(args.input_dir , '''tokenizer.model''' ) write_tokenizer(args.output_dir , __snake_case ) if __name__ == "__main__": main()
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
import math def _snake_case ( __snake_case ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( __snake_case = 10001 ): try: _UpperCamelCase = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) _UpperCamelCase = [] _UpperCamelCase = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'{solution() = }')
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
import torch def _snake_case ( ): if torch.cuda.is_available(): _UpperCamelCase = torch.cuda.device_count() else: _UpperCamelCase = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _lowerCAmelCase = False class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : str , _A : List[str]=32 ): set_seed(0 ) _UpperCamelCase = UNetaDModel(sample_size=_A , in_channels=3 , out_channels=3 ) _UpperCamelCase = torch.optim.SGD(model.parameters() , lr=0.0001 ) return model, optimizer @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase = DDPMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_A , ) _UpperCamelCase = DDIMScheduler( num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_A , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) _UpperCamelCase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_A ) for _ in range(4 )] _UpperCamelCase = [torch.randn((4, 3, 32, 32) ).to(_A ) for _ in range(4 )] _UpperCamelCase = [torch.randint(0 , 1000 , (4,) ).long().to(_A ) for _ in range(4 )] # train with a DDPM scheduler _UpperCamelCase = self.get_model_optimizer(resolution=32 ) model.train().to(_A ) for i in range(4 ): optimizer.zero_grad() _UpperCamelCase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _UpperCamelCase = model(_A , timesteps[i] ).sample _UpperCamelCase = torch.nn.functional.mse_loss(_A , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase = self.get_model_optimizer(resolution=32 ) model.train().to(_A ) for i in range(4 ): optimizer.zero_grad() _UpperCamelCase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _UpperCamelCase = model(_A , timesteps[i] ).sample _UpperCamelCase = torch.nn.functional.mse_loss(_A , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) ) self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _UpperCamelCase = mf_knapsack(i - 1 , a__ , a__ , a__ ) else: _UpperCamelCase = max( mf_knapsack(i - 1 , a__ , a__ , a__ ) , mf_knapsack(i - 1 , a__ , a__ , j - wt[i - 1] ) + val[i - 1] , ) _UpperCamelCase = val return f[i][j] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _UpperCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _UpperCamelCase = dp[i - 1][w_] return dp[n][w_], dp def _snake_case ( __snake_case , __snake_case , __snake_case ): if not (isinstance(a__ , (list, tuple) ) and isinstance(a__ , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) _UpperCamelCase = len(a__ ) if num_items != len(a__ ): _UpperCamelCase = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(a__ )} values""" ) raise ValueError(a__ ) for i in range(a__ ): if not isinstance(wt[i] , a__ ): _UpperCamelCase = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(a__ ) _UpperCamelCase , _UpperCamelCase = knapsack(a__ , a__ , a__ , a__ ) _UpperCamelCase = set() _construct_solution(a__ , a__ , a__ , a__ , a__ ) return optimal_val, example_optional_set def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(a__ , a__ , i - 1 , a__ , a__ ) else: optimal_set.add(a__ ) _construct_solution(a__ , a__ , i - 1 , j - wt[i - 1] , a__ ) if __name__ == "__main__": _lowerCAmelCase = [3, 2, 4, 4] _lowerCAmelCase = [4, 3, 2, 3] _lowerCAmelCase = 4 _lowerCAmelCase = 6 _lowerCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _lowerCAmelCase , _lowerCAmelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _lowerCAmelCase , _lowerCAmelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ : def __init__( self : Any , _A : Union[str, Any] , _A : Optional[Any]=13 , _A : List[Any]=32 , _A : Tuple=3 , _A : int=4 , _A : int=[10, 20, 30, 40] , _A : Union[str, Any]=[2, 2, 3, 2] , _A : Optional[Any]=True , _A : str=True , _A : str=37 , _A : Optional[int]="gelu" , _A : List[Any]=10 , _A : Any=0.02 , _A : str=["stage2", "stage3", "stage4"] , _A : Tuple=3 , _A : List[str]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = num_channels _UpperCamelCase = num_stages _UpperCamelCase = hidden_sizes _UpperCamelCase = depths _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = out_features _UpperCamelCase = num_labels _UpperCamelCase = scope _UpperCamelCase = num_stages def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : Tuple ): return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def UpperCamelCase_ ( self : str ): return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase_ , loss_ignore_index=255 , num_labels=self.num_labels , ) def UpperCamelCase_ ( self : Any , _A : Tuple , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = UperNetForSemanticSegmentation(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _UpperCamelCase = model(lowerCAmelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __lowerCAmelCase, __lowerCAmelCase, unittest.TestCase ): UpperCAmelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else () UpperCAmelCase = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = UperNetModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def UpperCamelCase_ ( self : List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase_ ( self : Union[str, Any] ): return def UpperCamelCase_ ( self : int ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(lowerCAmelCase_ ) _UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ ) @unittest.skip(reason='''UperNet does not use inputs_embeds''' ) def UpperCamelCase_ ( self : List[str] ): pass @unittest.skip(reason='''UperNet does not support input and output embeddings''' ) def UpperCamelCase_ ( self : Optional[Any] ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def UpperCamelCase_ ( self : int ): pass @unittest.skip(reason='''UperNet does not have a base model''' ) def UpperCamelCase_ ( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase_ ( self : Dict ): pass def UpperCamelCase_ ( self : Any ): def check_hidden_states_output(_A : Optional[int] , _A : str , _A : Union[str, Any] ): _UpperCamelCase = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = _config_zero_init(lowerCAmelCase_ ) _UpperCamelCase = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _UpperCamelCase = model_class(config=lowerCAmelCase_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason='''UperNet does not have tied weights''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : Union[str, Any] ): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def _snake_case ( ): _UpperCamelCase = hf_hub_download( repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' ) _UpperCamelCase = Image.open(snake_case__ ).convert('''RGB''' ) return image @require_torch @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' ) _UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCAmelCase_ ) _UpperCamelCase = prepare_img() _UpperCamelCase = processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) with torch.no_grad(): _UpperCamelCase = model(**lowerCAmelCase_ ) _UpperCamelCase = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _UpperCamelCase = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' ) _UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCAmelCase_ ) _UpperCamelCase = prepare_img() _UpperCamelCase = processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) with torch.no_grad(): _UpperCamelCase = model(**lowerCAmelCase_ ) _UpperCamelCase = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _UpperCamelCase = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
def _snake_case ( __snake_case ): if not head: return True # split the list to two parts _UpperCamelCase = head.next, head while fast and fast.next: _UpperCamelCase = fast.next.next _UpperCamelCase = slow.next _UpperCamelCase = slow.next _UpperCamelCase = None # Don't forget here! But forget still works! # reverse the second part _UpperCamelCase = None while second: _UpperCamelCase = second.next _UpperCamelCase = node _UpperCamelCase = second _UpperCamelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False _UpperCamelCase = node.next _UpperCamelCase = head.next return True def _snake_case ( __snake_case ): if not head or not head.next: return True # 1. Get the midpoint (slow) _UpperCamelCase = head while fast and fast.next: _UpperCamelCase = fast.next.next, slow.next # 2. Push the second half into the stack _UpperCamelCase = [slow.val] while slow.next: _UpperCamelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False _UpperCamelCase = cur.next return True def _snake_case ( __snake_case ): if not head or not head.next: return True _UpperCamelCase = {} _UpperCamelCase = 0 while head: if head.val in d: d[head.val].append(__snake_case ) else: _UpperCamelCase = [pos] _UpperCamelCase = head.next pos += 1 _UpperCamelCase = pos - 1 _UpperCamelCase = 0 for v in d.values(): if len(__snake_case ) % 2 != 0: middle += 1 else: _UpperCamelCase = 0 for i in range(0 , len(__snake_case ) ): if v[i] + v[len(__snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = { """configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""], """feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""], """processing_wav2vec2""": ["""Wav2Vec2Processor"""], """tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Wav2Vec2ForAudioFrameClassification""", """Wav2Vec2ForCTC""", """Wav2Vec2ForMaskedLM""", """Wav2Vec2ForPreTraining""", """Wav2Vec2ForSequenceClassification""", """Wav2Vec2ForXVector""", """Wav2Vec2Model""", """Wav2Vec2PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWav2Vec2ForCTC""", """TFWav2Vec2Model""", """TFWav2Vec2PreTrainedModel""", """TFWav2Vec2ForSequenceClassification""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ """FlaxWav2Vec2ForCTC""", """FlaxWav2Vec2ForPreTraining""", """FlaxWav2Vec2Model""", """FlaxWav2Vec2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCAmelCase = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["ChineseCLIPFeatureExtractor"] _lowerCAmelCase = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowerCAmelCase = None _lowerCAmelCase = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowerCAmelCase = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class lowerCAmelCase_ : UpperCAmelCase = True UpperCAmelCase = None # Automatically constructed UpperCAmelCase = "PIL.Image.Image" UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) UpperCAmelCase = field(default="Image", init=_snake_case, repr=_snake_case ) def __call__( self : Union[str, Any] ): return self.pa_type def UpperCamelCase_ ( self : List[str] , _A : str ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(snake_case_ , snake_case_ ): _UpperCamelCase = np.array(snake_case_ ) if isinstance(snake_case_ , snake_case_ ): return {"path": value, "bytes": None} elif isinstance(snake_case_ , snake_case_ ): return {"path": None, "bytes": value} elif isinstance(snake_case_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(snake_case_ ) elif isinstance(snake_case_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(snake_case_ ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" ) def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[Any]=None ): if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: _UpperCamelCase = {} _UpperCamelCase = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of \'path\' or \'bytes\' but both are None in {value}.""" ) else: if is_local_path(snake_case_ ): _UpperCamelCase = PIL.Image.open(snake_case_ ) else: _UpperCamelCase = path.split('''::''' )[-1] try: _UpperCamelCase = string_to_dict(snake_case_ , config.HUB_DATASETS_URL )["repo_id"] _UpperCamelCase = token_per_repo_id.get(snake_case_ ) except ValueError: _UpperCamelCase = None with xopen(snake_case_ , '''rb''' , use_auth_token=snake_case_ ) as f: _UpperCamelCase = BytesIO(f.read() ) _UpperCamelCase = PIL.Image.open(bytes_ ) else: _UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def UpperCamelCase_ ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def UpperCamelCase_ ( self : List[str] , _A : Dict ): if pa.types.is_string(storage.type ): _UpperCamelCase = pa.array([None] * len(snake_case_ ) , type=pa.binary() ) _UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _UpperCamelCase = pa.array([None] * len(snake_case_ ) , type=pa.string() ) _UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: _UpperCamelCase = storage.field('''bytes''' ) else: _UpperCamelCase = pa.array([None] * len(snake_case_ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: _UpperCamelCase = storage.field('''path''' ) else: _UpperCamelCase = pa.array([None] * len(snake_case_ ) , type=pa.string() ) _UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _UpperCamelCase = pa.array( [encode_np_array(np.array(snake_case_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _UpperCamelCase = pa.array([None] * len(snake_case_ ) , type=pa.string() ) _UpperCamelCase = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(snake_case_ , self.pa_type ) def UpperCamelCase_ ( self : Optional[int] , _A : Dict ): @no_op_if_value_is_null def path_to_bytes(_A : Optional[Any] ): with xopen(snake_case_ , '''rb''' ) as f: _UpperCamelCase = f.read() return bytes_ _UpperCamelCase = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _UpperCamelCase = pa.array( [os.path.basename(snake_case_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) _UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(snake_case_ , self.pa_type ) def _snake_case ( ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def _snake_case ( __snake_case ): _UpperCamelCase = BytesIO() if image.format in list_image_compression_formats(): _UpperCamelCase = image.format else: _UpperCamelCase = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(__snake_case , format=__snake_case ) return buffer.getvalue() def _snake_case ( __snake_case ): if hasattr(__snake_case , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(__snake_case )} def _snake_case ( __snake_case ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) _UpperCamelCase = array.dtype _UpperCamelCase = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER _UpperCamelCase = dtype.kind _UpperCamelCase = dtype.itemsize _UpperCamelCase = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _UpperCamelCase = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _UpperCamelCase = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _UpperCamelCase = dtype_byteorder + dtype_kind + str(__snake_case ) _UpperCamelCase = np.dtype(__snake_case ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _UpperCamelCase = PIL.Image.fromarray(array.astype(__snake_case ) ) return {"path": None, "bytes": image_to_bytes(__snake_case )} def _snake_case ( __snake_case ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: _UpperCamelCase = first_non_null_value(__snake_case ) if isinstance(__snake_case , __snake_case ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(__snake_case , np.ndarray ): _UpperCamelCase = no_op_if_value_is_null(__snake_case ) return [obj_to_image_dict_func(__snake_case ) for obj in objs] elif isinstance(__snake_case , PIL.Image.Image ): _UpperCamelCase = no_op_if_value_is_null(__snake_case ) return [obj_to_image_dict_func(__snake_case ) for obj in objs] else: return objs else: return objs
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCAmelCase_ ( lowercase__ ): def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase__ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(UpperCAmelCase__ , '''depth_multiplier''' ) ) class lowerCAmelCase_ : def __init__( self : int , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Any=3 , _A : Union[str, Any]=32 , _A : Union[str, Any]=0.25 , _A : List[Any]=8 , _A : Union[str, Any]=8 , _A : str=6 , _A : int=32 , _A : Optional[Any]=True , _A : Union[str, Any]=True , _A : int=True , _A : Optional[int]="relu6" , _A : List[Any]=1280 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.02 , _A : str=True , _A : List[str]=True , _A : Union[str, Any]=10 , _A : Tuple=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = depth_multiplier _UpperCamelCase = depth_divisible_by _UpperCamelCase = min_depth _UpperCamelCase = expand_ratio _UpperCamelCase = tf_padding _UpperCamelCase = output_stride _UpperCamelCase = first_layer_is_expansion _UpperCamelCase = finegrained_output _UpperCamelCase = hidden_act _UpperCamelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) _UpperCamelCase = classifier_dropout_prob _UpperCamelCase = use_labels _UpperCamelCase = is_training _UpperCamelCase = num_labels _UpperCamelCase = initializer_range _UpperCamelCase = scope def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase_ ( self : Optional[int] ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : Union[str, Any] , _A : int , _A : Dict ): _UpperCamelCase = MobileNetVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _UpperCamelCase = model(UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def UpperCamelCase_ ( self : Dict , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] ): _UpperCamelCase = self.num_labels _UpperCamelCase = MobileNetVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _UpperCamelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Optional[int] , _A : List[Any] , _A : Any ): _UpperCamelCase = self.num_labels _UpperCamelCase = MobileNetVaForSemanticSegmentation(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _UpperCamelCase = model(UpperCAmelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _UpperCamelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase__, lowercase__, unittest.TestCase ): UpperCAmelCase = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = MobileNetVaModelTester(self ) _UpperCamelCase = MobileNetVaConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ ) def UpperCamelCase_ ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def UpperCamelCase_ ( self : int ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def UpperCamelCase_ ( self : Optional[Any] ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def UpperCamelCase_ ( self : Optional[Any] ): pass def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(UpperCAmelCase__ ) _UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCamelCase_ ( self : Union[str, Any] ): def check_hidden_states_output(_A : Optional[Any] , _A : Any , _A : Dict ): _UpperCamelCase = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): _UpperCamelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) _UpperCamelCase = outputs.hidden_states _UpperCamelCase = 16 self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ ) _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ ) @slow def UpperCamelCase_ ( self : int ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = MobileNetVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _snake_case ( ): _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Union[str, Any] ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(UpperCAmelCase__ ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): _UpperCamelCase = model(**UpperCAmelCase__ ) # verify the logits _UpperCamelCase = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) ) @slow def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) _UpperCamelCase = model.to(UpperCAmelCase__ ) _UpperCamelCase = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): _UpperCamelCase = model(**UpperCAmelCase__ ) _UpperCamelCase = outputs.logits # verify the logits _UpperCamelCase = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , UpperCAmelCase__ ) _UpperCamelCase = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=UpperCAmelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' _lowerCAmelCase = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _lowerCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}] _lowerCAmelCase = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
from torch import nn class lowerCAmelCase_ ( nn.Module ): def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any] ): super().__init__() _UpperCamelCase = class_size _UpperCamelCase = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) _UpperCamelCase = nn.Linear(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase_ ( self : Tuple , _A : Dict ): _UpperCamelCase = self.mlp(__lowerCamelCase ) return logits
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowerCAmelCase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): UpperCAmelCase = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : str , **_A : Union[str, Any] ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _UpperCamelCase = deprecated_arg[3:] _UpperCamelCase = not kwargs.pop(_lowercase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) _UpperCamelCase = kwargs.pop('''tpu_name''' , self.tpu_name ) _UpperCamelCase = kwargs.pop('''device_idx''' , self.device_idx ) _UpperCamelCase = kwargs.pop('''eager_mode''' , self.eager_mode ) _UpperCamelCase = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**_lowercase ) UpperCAmelCase = field( default=UpperCAmelCase_, metadata={"help": "Name of TPU"}, ) UpperCAmelCase = field( default=0, metadata={"help": "CPU / GPU device index. Defaults to 0."}, ) UpperCAmelCase = field(default=UpperCAmelCase_, metadata={"help": "Benchmark models in eager model."} ) UpperCAmelCase = field( default=UpperCAmelCase_, metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." }, ) @cached_property def UpperCamelCase_ ( self : Optional[Any] ): requires_backends(self , ['''tf'''] ) _UpperCamelCase = None if self.tpu: try: if self.tpu_name: _UpperCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _UpperCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _UpperCamelCase = None return tpu @cached_property def UpperCamelCase_ ( self : Dict ): requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _UpperCamelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) _UpperCamelCase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU _UpperCamelCase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def UpperCamelCase_ ( self : List[Any] ): requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def UpperCamelCase_ ( self : Optional[Any] ): requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def UpperCamelCase_ ( self : str ): requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def UpperCamelCase_ ( self : Any ): requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCamelCase_ ( self : Dict ): return self.n_gpu > 0
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _snake_case ( __snake_case , __snake_case , __snake_case ): _UpperCamelCase = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE ) assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__SCREAMING_SNAKE_CASE )}"""
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) _lowerCAmelCase = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) _lowerCAmelCase = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) _lowerCAmelCase = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) _lowerCAmelCase = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModel) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class lowerCAmelCase_ ( _BaseAutoModelClass ): UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["transformers", "torch", "note_seq"] def __init__( self : int , *_A : List[str] , **_A : str ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase_ ( cls : Tuple , *_A : List[str] , **_A : Any ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase_ ( cls : Optional[Any] , *_A : Dict , **_A : Tuple ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''' , [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ] , ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) _UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''' , [ DatasetInfo(), DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ), ] , ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = str(_SCREAMING_SNAKE_CASE ) dataset_info.write_to_directory(_SCREAMING_SNAKE_CASE ) _UpperCamelCase = DatasetInfo.from_directory(_SCREAMING_SNAKE_CASE ) assert dataset_info == reloaded assert os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , '''dataset_info.json''' ) ) def _snake_case ( ): _UpperCamelCase = DatasetInfo( description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCamelCase = dataset_info._to_yaml_dict() assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCamelCase = yaml.safe_dump(_SCREAMING_SNAKE_CASE ) _UpperCamelCase = yaml.safe_load(_SCREAMING_SNAKE_CASE ) assert dataset_info_yaml_dict == reloaded def _snake_case ( ): _UpperCamelCase = DatasetInfo() _UpperCamelCase = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''' , [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=1337 ), } ), ] , ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = str(_SCREAMING_SNAKE_CASE ) dataset_infos_dict.write_to_directory(_SCREAMING_SNAKE_CASE ) _UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCamelCase = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCamelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , '''README.md''' ) )
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
from __future__ import annotations def _snake_case ( __snake_case , __snake_case = None ): _UpperCamelCase = word_bank or [] # create a table _UpperCamelCase = len(__snake_case ) + 1 _UpperCamelCase = [] for _ in range(__snake_case ): table.append([] ) # seed value _UpperCamelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(__snake_case ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__snake_case )] == word: _UpperCamelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__snake_case )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__snake_case )]: combination.reverse() return table[len(__snake_case )] if __name__ == "__main__": print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) print( all_construct( "hexagonosaurus", ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], ) )
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _lowerCAmelCase = logging.get_logger(__name__) def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ): _UpperCamelCase = round(val / multiple ) * multiple if max_val is not None and x > max_val: _UpperCamelCase = math.floor(val / multiple ) * multiple if x < min_val: _UpperCamelCase = math.ceil(val / multiple ) * multiple return x _UpperCamelCase = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size _UpperCamelCase = get_image_size(lowerCAmelCase__ ) _UpperCamelCase = output_size # determine new height and width _UpperCamelCase = output_height / input_height _UpperCamelCase = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _UpperCamelCase = scale_width else: # fit height _UpperCamelCase = scale_height _UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ ) _UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ ) return (new_height, new_width) class lowerCAmelCase_ ( __A ): UpperCAmelCase = ["""pixel_values"""] def __init__( self : List[Any] , _A : List[Any] = True , _A : Any = None , _A : str = PILImageResampling.BILINEAR , _A : Optional[int] = False , _A : str = 1 , _A : List[str] = True , _A : Optional[int] = 1 / 255 , _A : Dict = True , _A : Tuple = None , _A : Optional[int] = None , **_A : Tuple , ): super().__init__(**UpperCamelCase__ ) _UpperCamelCase = size if size is not None else {'height': 384, 'width': 384} _UpperCamelCase = get_size_dict(UpperCamelCase__ ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = keep_aspect_ratio _UpperCamelCase = ensure_multiple_of _UpperCamelCase = resample _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase_ ( self : Dict , _A : Dict , _A : Union[str, Any] , _A : int = False , _A : List[str] = 1 , _A : Tuple = PILImageResampling.BICUBIC , _A : Optional[Any] = None , **_A : List[Any] , ): _UpperCamelCase = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" ) _UpperCamelCase = get_resize_output_image_size( UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase_ ( self : List[str] , _A : List[Any] , _A : Optional[int] , _A : Dict = None , **_A : Dict , ): return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase_ ( self : int , _A : int , _A : Dict , _A : Dict , _A : Optional[Any] = None , **_A : Tuple , ): return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase_ ( self : Any , _A : str , _A : Tuple = None , _A : int = None , _A : Tuple = None , _A : str = None , _A : Dict = None , _A : List[str] = None , _A : Optional[Any] = None , _A : Tuple = None , _A : Dict = None , _A : Union[str, Any] = None , _A : List[str] = None , _A : List[str] = ChannelDimension.FIRST , **_A : List[Any] , ): _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(UpperCamelCase__ ) _UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] _UpperCamelCase = {'pixel_values': images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ ) def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : List[Any] = None ): _UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCamelCase__ ): _UpperCamelCase = target_sizes.numpy() _UpperCamelCase = [] for idx in range(len(UpperCamelCase__ ) ): _UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ ) _UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCamelCase__ ) else: _UpperCamelCase = logits.argmax(dim=1 ) _UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( nn.Module ): def __init__( self : int , _A : Union[str, Any] , _A : List[Any] , _A : Optional[int] = 3 , _A : str = 1 , _A : str = 1 , _A : Optional[int] = "relu" , ): super().__init__() _UpperCamelCase = nn.Convad( lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , ) _UpperCamelCase = nn.BatchNormad(lowerCamelCase_ ) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def UpperCamelCase_ ( self : List[str] , _A : Tuple ): _UpperCamelCase = self.convolution(lowerCamelCase_ ) _UpperCamelCase = self.normalization(lowerCamelCase_ ) _UpperCamelCase = self.activation(lowerCamelCase_ ) return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : Optional[Any] , _A : int ): super().__init__() _UpperCamelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) _UpperCamelCase = config.num_channels def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCamelCase = self.embedder(lowerCamelCase_ ) return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int = 2 ): super().__init__() _UpperCamelCase = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ ) _UpperCamelCase = nn.BatchNormad(lowerCamelCase_ ) def UpperCamelCase_ ( self : Dict , _A : int ): _UpperCamelCase = self.convolution(lowerCamelCase_ ) _UpperCamelCase = self.normalization(lowerCamelCase_ ) return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : int , _A : Dict , _A : Optional[Any] ): super().__init__() _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) _UpperCamelCase = nn.Sequential( nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , ) def UpperCamelCase_ ( self : str , _A : Optional[int] ): # b c h w -> b c 1 1 _UpperCamelCase = self.pooler(lowerCamelCase_ ) _UpperCamelCase = self.attention(lowerCamelCase_ ) _UpperCamelCase = hidden_state * attention return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : Tuple , _A : List[str] , _A : str , _A : Any , _A : List[Any] = 1 ): super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , ) _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : str , _A : List[Any] ): _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCamelCase_ ) _UpperCamelCase = self.shortcut(lowerCamelCase_ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCamelCase_ ) return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : Any , _A : Dict , _A : Optional[int] , _A : Tuple , _A : Optional[int] = 1 ): super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , ) _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Any , _A : Union[str, Any] ): _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCamelCase_ ) _UpperCamelCase = self.shortcut(lowerCamelCase_ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCamelCase_ ) return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : Optional[Any] , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : Dict = 2 , _A : Union[str, Any] = 2 , ): super().__init__() _UpperCamelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , ) def UpperCamelCase_ ( self : Any , _A : int ): _UpperCamelCase = self.layers(lowerCamelCase_ ) return hidden_state class lowerCAmelCase_ ( nn.Module ): def __init__( self : str , _A : Optional[int] ): super().__init__() _UpperCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) ) def UpperCamelCase_ ( self : List[Any] , _A : str , _A : Any = False , _A : Any = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowerCamelCase_ ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ ) class lowerCAmelCase_ ( a__ ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" UpperCAmelCase = True def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, Any] ): if isinstance(lowerCamelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def UpperCamelCase_ ( self : str , _A : int , _A : List[str]=False ): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _UpperCamelCase = value _lowerCAmelCase = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", a__, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowerCAmelCase_ ( a__ ): def __init__( self : Union[str, Any] , _A : Tuple ): super().__init__(lowerCamelCase_ ) _UpperCamelCase = config _UpperCamelCase = RegNetEmbeddings(lowerCamelCase_ ) _UpperCamelCase = RegNetEncoder(lowerCamelCase_ ) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Any = None , _A : Dict = None ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowerCamelCase_ ) _UpperCamelCase = self.encoder( lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowerCamelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", a__, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowerCAmelCase_ ( a__ ): def __init__( self : Optional[int] , _A : str ): super().__init__(lowerCamelCase_ ) _UpperCamelCase = config.num_labels _UpperCamelCase = RegNetModel(lowerCamelCase_ ) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCamelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : List[str] , _A : List[Any] = None , _A : Any = None , _A : Any = None , _A : Optional[Any] = None , ): _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowerCamelCase_ ) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = '''single_label_classification''' else: _UpperCamelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCamelCase = loss_fct(lowerCamelCase_ , lowerCamelCase_ ) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowerCamelCase_ , lowerCamelCase_ ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
def _snake_case ( __snake_case , __snake_case ): if b == 0: return 1 if (b % 2) == 0: return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) ) else: return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) ) def _snake_case ( __snake_case , __snake_case ): if b < 0: return 1 / actual_power(__snake_case , __snake_case ) return actual_power(__snake_case , __snake_case ) if __name__ == "__main__": print(power(-2, -3))
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
from torch import nn def _snake_case ( __snake_case ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"""Unsupported activation function: {act_fn}""" )
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( _UpperCamelCase ): UpperCAmelCase = ["image_processor", "tokenizer"] UpperCAmelCase = "ViTImageProcessor" UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Tuple , _A : Optional[int]=None , _A : str=None , **_A : Dict ): _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _UpperCAmelCase , ) _UpperCamelCase = kwargs.pop('''feature_extractor''' ) _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__( self : Optional[Any] , _A : Dict=None , _A : List[str]=None , _A : List[str]=None , _A : int=None , **_A : Any ): if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: _UpperCamelCase = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if visual_prompt is not None: _UpperCamelCase = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if images is not None: _UpperCamelCase = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if visual_prompt is not None and images is not None: _UpperCamelCase = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: _UpperCamelCase = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def UpperCamelCase_ ( self : int , *_A : List[str] , **_A : Any ): return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def UpperCamelCase_ ( self : str , *_A : Dict , **_A : List[str] ): return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def UpperCamelCase_ ( self : Any ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , ) return self.image_processor_class @property def UpperCamelCase_ ( self : str ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , ) return self.image_processor
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
def _snake_case ( __snake_case ): _UpperCamelCase = len(UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: _UpperCamelCase , _UpperCamelCase = arr[i + 1], arr[i] return arr if __name__ == "__main__": _lowerCAmelCase = list(range(10, 0, -1)) print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
from __future__ import annotations def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = len(UpperCAmelCase__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(UpperCAmelCase__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCAmelCase__ , UpperCAmelCase__ , ) def _snake_case ( __snake_case ): _UpperCamelCase = [] depth_first_search([] , [] , [] , UpperCAmelCase__ , UpperCAmelCase__ ) # Print all the boards for board in boards: for column in board: print(UpperCAmelCase__ ) print('''''' ) print(len(UpperCAmelCase__ ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=1024 ): _UpperCamelCase , _UpperCamelCase = [], [] _UpperCamelCase = list(zip(_UpperCamelCase , _UpperCamelCase ) ) _UpperCamelCase , _UpperCamelCase = sorted_examples[0] def is_too_big(__snake_case ): return tok(_UpperCamelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): _UpperCamelCase = new_src + ''' ''' + src _UpperCamelCase = new_tgt + ''' ''' + tgt if is_too_big(_UpperCamelCase ) or is_too_big(_UpperCamelCase ): # cant fit, finalize example finished_src.append(_UpperCamelCase ) finished_tgt.append(_UpperCamelCase ) _UpperCamelCase , _UpperCamelCase = src, tgt else: # can fit, keep adding _UpperCamelCase , _UpperCamelCase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_UpperCamelCase ) finished_tgt.append(_UpperCamelCase ) return finished_src, finished_tgt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = Path(_UpperCamelCase ) save_path.mkdir(exist_ok=_UpperCamelCase ) for split in ["train"]: _UpperCamelCase , _UpperCamelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" _UpperCamelCase = [x.rstrip() for x in Path(_UpperCamelCase ).open().readlines()] _UpperCamelCase = [x.rstrip() for x in Path(_UpperCamelCase ).open().readlines()] _UpperCamelCase , _UpperCamelCase = pack_examples(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) print(f"""packed {split} split from {len(_UpperCamelCase )} examples -> {len(_UpperCamelCase )}.""" ) Path(save_path / f"""{split}.source""" ).open('''w''' ).write('''\n'''.join(_UpperCamelCase ) ) Path(save_path / f"""{split}.target""" ).open('''w''' ).write('''\n'''.join(_UpperCamelCase ) ) for split in ["val", "test"]: _UpperCamelCase , _UpperCamelCase = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(_UpperCamelCase , save_path / f"""{split}.source""" ) shutil.copyfile(_UpperCamelCase , save_path / f"""{split}.target""" ) def _snake_case ( ): _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=_UpperCamelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=_UpperCamelCase , default=128 ) parser.add_argument('''--data_dir''' , type=_UpperCamelCase ) parser.add_argument('''--save_path''' , type=_UpperCamelCase ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class lowerCAmelCase_ ( __UpperCAmelCase ): UpperCAmelCase = "roberta" def __init__( self : Tuple , _A : Tuple=5_0265 , _A : Dict=768 , _A : Optional[int]=12 , _A : int=12 , _A : Optional[int]=3072 , _A : Optional[int]="gelu" , _A : Any=0.1 , _A : int=0.1 , _A : str=512 , _A : Tuple=2 , _A : Union[str, Any]=0.02 , _A : Dict=1e-12 , _A : Optional[int]=1 , _A : str=0 , _A : Optional[int]=2 , _A : Any="absolute" , _A : Optional[Any]=True , _A : str=None , **_A : Any , ): super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = position_embedding_type _UpperCamelCase = use_cache _UpperCamelCase = classifier_dropout class lowerCAmelCase_ ( __UpperCAmelCase ): @property def UpperCamelCase_ ( self : Optional[Any] ): if self.task == "multiple-choice": _UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", } class lowerCAmelCase_ ( __lowerCAmelCase ): UpperCAmelCase = "markuplm" def __init__( self : List[Any] , _A : Optional[int]=3_0522 , _A : Dict=768 , _A : List[str]=12 , _A : List[Any]=12 , _A : Optional[Any]=3072 , _A : int="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Tuple=512 , _A : List[str]=2 , _A : Union[str, Any]=0.02 , _A : Optional[Any]=1e-12 , _A : List[str]=0 , _A : Tuple=0 , _A : List[str]=2 , _A : List[Any]=256 , _A : List[str]=1024 , _A : Optional[int]=216 , _A : Any=1001 , _A : int=32 , _A : Tuple=50 , _A : str="absolute" , _A : List[str]=True , _A : List[Any]=None , **_A : List[str] , ): super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = position_embedding_type _UpperCamelCase = use_cache _UpperCamelCase = classifier_dropout # additional properties _UpperCamelCase = max_depth _UpperCamelCase = max_xpath_tag_unit_embeddings _UpperCamelCase = max_xpath_subs_unit_embeddings _UpperCamelCase = tag_pad_id _UpperCamelCase = subs_pad_id _UpperCamelCase = xpath_unit_hidden_size
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _snake_case ( __snake_case ): if isinstance(snake_case_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase_ : def UpperCamelCase_ ( self : Optional[Any] , _A : List[Any] , _A : str ): pass def UpperCamelCase_ ( self : List[str] ): pass def UpperCamelCase_ ( self : str ): pass def UpperCamelCase_ ( self : Dict , _A : str , _A : Optional[Any] , _A : str ): _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(_A , _A , F"""Difference between torch and flax is {diff} (>= {tol}).""" ) def UpperCamelCase_ ( self : List[str] , _A : int , _A : Any , _A : str , _A : Optional[int] , _A : Tuple=None , **_A : Any ): _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def UpperCamelCase_ ( self : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : Optional[int]=None , **_A : List[Any] ): _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def UpperCamelCase_ ( self : Optional[int] , _A : Optional[int] , _A : str , _A : str , _A : int , _A : str=None , **_A : List[str] ): _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) _UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(_A ) _UpperCamelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A ) _UpperCamelCase = after_output[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1e-3 ) def UpperCamelCase_ ( self : int , _A : int , _A : Optional[int] , _A : Dict , _A : Optional[Any] , _A : Any=None , **_A : List[Any] ): _UpperCamelCase = self.get_vision_text_model(_A , _A ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A ) _UpperCamelCase = model( input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(_A ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(_A ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def UpperCamelCase_ ( self : Any , _A : Dict , _A : Any , _A : Any ): pt_model.to(_A ) pt_model.eval() # prepare inputs _UpperCamelCase = inputs_dict _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _UpperCamelCase = pt_model(**_A ).to_tuple() _UpperCamelCase = fx_model(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_A ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A ) _UpperCamelCase = fx_model_loaded(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_A ) _UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A ) pt_model_loaded.to(_A ) pt_model_loaded.eval() with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4e-2 ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : List[str] ): _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) _UpperCamelCase = VisionTextDualEncoderModel(_A ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(_A ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A ) _UpperCamelCase = fx_state self.check_pt_flax_equivalence(_A , _A , _A ) def UpperCamelCase_ ( self : List[Any] , _A : str , _A : Dict , _A : Tuple ): _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A ) _UpperCamelCase = VisionTextDualEncoderModel(_A ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(_A ) _UpperCamelCase = load_flax_weights_in_pytorch_model(_A , fx_model.params ) self.check_pt_flax_equivalence(_A , _A , _A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_A ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_A ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**_A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_A ) @is_pt_flax_cross_test def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_inputs_dict.pop('''vision_config''' ) _UpperCamelCase = config_inputs_dict.pop('''text_config''' ) _UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(_A , _A , _A ) self.check_equivalence_flax_to_pt(_A , _A , _A ) @slow def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**_A ) _UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_A ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(_A ) _UpperCamelCase = model_a(**_A ) _UpperCamelCase = after_outputs[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1e-5 ) @require_flax class lowerCAmelCase_ ( UpperCamelCase_, unittest.TestCase ): def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = FlaxViTModel(_A ) _UpperCamelCase = FlaxBertModel(_A ) return vision_model, text_model def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = FlaxViTModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase = vision_config_and_inputs _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase_ ( UpperCamelCase_, unittest.TestCase ): def UpperCamelCase_ ( self : int ): _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def UpperCamelCase_ ( self : Any , _A : Union[str, Any] , _A : Optional[Any] ): _UpperCamelCase = FlaxCLIPVisionModel(_A ) _UpperCamelCase = FlaxBertModel(_A ) return vision_model, text_model def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = FlaxCLIPVisionModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase = vision_config_and_inputs _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''' ) _UpperCamelCase = model(**_A ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1e-3 ) )
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' ) _UpperCamelCase = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository _UpperCamelCase = {} for k, v in state_dict.items(): if "pred_layer" in k: _UpperCamelCase = v else: _UpperCamelCase = v _UpperCamelCase = chkpt['''params'''] _UpperCamelCase = {n: v for n, v in config.items() if not isinstance(UpperCamelCase__ , (torch.FloatTensor, numpy.ndarray) )} _UpperCamelCase = chkpt['''dico_word2id'''] _UpperCamelCase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()} # Save pytorch-model _UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME _UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME _UpperCamelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , indent=2 ) + '''\n''' ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , indent=2 ) + '''\n''' ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
from __future__ import annotations def _snake_case ( __snake_case ): _UpperCamelCase = str(lowerCamelCase__ ) return n == n[::-1] def _snake_case ( __snake_case = 1000000 ): _UpperCamelCase = 0 for i in range(1 , lowerCamelCase__ ): if is_palindrome(lowerCamelCase__ ) and is_palindrome(bin(lowerCamelCase__ ).split('''b''' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class lowerCAmelCase_ ( snake_case__ ): UpperCAmelCase = "biogpt" def __init__( self : Union[str, Any] , _A : List[Any]=4_2384 , _A : int=1024 , _A : Optional[int]=24 , _A : List[str]=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : List[str]=0.1 , _A : List[Any]=0.1 , _A : Dict=1024 , _A : Optional[int]=0.02 , _A : Dict=1e-12 , _A : Tuple=True , _A : Any=True , _A : Tuple=0.0 , _A : Optional[int]=0.0 , _A : Any=1 , _A : Optional[Any]=0 , _A : str=2 , **_A : str , ): _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = scale_embedding _UpperCamelCase = use_cache _UpperCamelCase = layerdrop _UpperCamelCase = activation_dropout super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' from __future__ import annotations _lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _snake_case ( __snake_case ): _UpperCamelCase = [] _UpperCamelCase = len(__snake_case ) for i in range(__snake_case ): _UpperCamelCase = -1 for j in range(i + 1 , __snake_case ): if arr[i] < arr[j]: _UpperCamelCase = arr[j] break result.append(__snake_case ) return result def _snake_case ( __snake_case ): _UpperCamelCase = [] for i, outer in enumerate(__snake_case ): _UpperCamelCase = -1 for inner in arr[i + 1 :]: if outer < inner: _UpperCamelCase = inner break result.append(__snake_case ) return result def _snake_case ( __snake_case ): _UpperCamelCase = len(__snake_case ) _UpperCamelCase = [] _UpperCamelCase = [-1] * arr_size for index in reversed(range(__snake_case ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: _UpperCamelCase = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _lowerCAmelCase = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
from __future__ import annotations def _snake_case ( __snake_case , __snake_case = None , __snake_case = None ): if start is None: _UpperCamelCase = 0 if end is None: _UpperCamelCase = len(__snake_case ) - 1 if start >= end: return _UpperCamelCase = (start + end) // 2 slowsort(__snake_case , __snake_case , __snake_case ) slowsort(__snake_case , mid + 1 , __snake_case ) if sequence[end] < sequence[mid]: _UpperCamelCase , _UpperCamelCase = sequence[mid], sequence[end] slowsort(__snake_case , __snake_case , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer _lowerCAmelCase = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast _lowerCAmelCase = TaTokenizerFast _lowerCAmelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys _lowerCAmelCase = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): @property def UpperCamelCase_ ( self : str ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = ort.SessionOptions() _UpperCamelCase = False return options def UpperCamelCase_ ( self : str ): _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) _UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default _UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A ) _UpperCamelCase = '''A red cat sitting on a park bench''' _UpperCamelCase = np.random.RandomState(0 ) _UpperCamelCase = pipe( prompt=_A , image=_A , mask_image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_A , output_type='''np''' , ) _UpperCamelCase = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase_ ( unittest.TestCase ): UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] ): _UpperCamelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) _UpperCamelCase = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 ) _UpperCamelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def UpperCamelCase_ ( self : str , _A : Optional[Any] , _A : Optional[int] ): for example in examples: _UpperCamelCase = video_classifier(_A ) self.assertEqual( _A , [ {'''score''': ANY(_A ), '''label''': ANY(_A )}, {'''score''': ANY(_A ), '''label''': ANY(_A )}, ] , ) @require_torch def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' _UpperCamelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) _UpperCamelCase = pipeline( '''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 ) _UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) _UpperCamelCase = video_classifier(_A , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) _UpperCamelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def UpperCamelCase_ ( self : List[Any] ): pass
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) # TODO Update this _lowerCAmelCase = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "esm" def __init__( self : str , _A : Optional[int]=None , _A : Tuple=None , _A : List[str]=None , _A : str=768 , _A : str=12 , _A : Optional[Any]=12 , _A : int=3072 , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Dict=1026 , _A : List[Any]=0.02 , _A : Dict=1e-12 , _A : Optional[Any]="absolute" , _A : Union[str, Any]=True , _A : Optional[Any]=None , _A : str=False , _A : List[Any]=False , _A : Dict=None , _A : Optional[Any]=None , **_A : Tuple , ): super().__init__(pad_token_id=_A , mask_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = position_embedding_type _UpperCamelCase = use_cache _UpperCamelCase = emb_layer_norm_before _UpperCamelCase = token_dropout _UpperCamelCase = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) _UpperCamelCase = EsmFoldConfig() elif isinstance(_A , _A ): _UpperCamelCase = EsmFoldConfig(**_A ) _UpperCamelCase = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) _UpperCamelCase = get_default_vocab_list() else: _UpperCamelCase = vocab_list else: _UpperCamelCase = None _UpperCamelCase = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _A ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = super().to_dict() if isinstance(self.esmfold_config , _A ): _UpperCamelCase = self.esmfold_config.to_dict() return output @dataclass class lowerCAmelCase_ : UpperCAmelCase = None UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 0 UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = 128 UpperCAmelCase = None def UpperCamelCase_ ( self : Dict ): if self.trunk is None: _UpperCamelCase = TrunkConfig() elif isinstance(self.trunk , _A ): _UpperCamelCase = TrunkConfig(**self.trunk ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = asdict(self ) _UpperCamelCase = self.trunk.to_dict() return output @dataclass class lowerCAmelCase_ : UpperCAmelCase = 48 UpperCAmelCase = 1024 UpperCAmelCase = 128 UpperCAmelCase = 32 UpperCAmelCase = 32 UpperCAmelCase = 32 UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = False UpperCAmelCase = 4 UpperCAmelCase = 128 UpperCAmelCase = None def UpperCamelCase_ ( self : List[Any] ): if self.structure_module is None: _UpperCamelCase = StructureModuleConfig() elif isinstance(self.structure_module , _A ): _UpperCamelCase = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) _UpperCamelCase = self.sequence_state_dim // self.sequence_head_width _UpperCamelCase = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = asdict(self ) _UpperCamelCase = self.structure_module.to_dict() return output @dataclass class lowerCAmelCase_ : UpperCAmelCase = 384 UpperCAmelCase = 128 UpperCAmelCase = 16 UpperCAmelCase = 128 UpperCAmelCase = 12 UpperCAmelCase = 4 UpperCAmelCase = 8 UpperCAmelCase = 0.1 UpperCAmelCase = 8 UpperCAmelCase = 1 UpperCAmelCase = 2 UpperCAmelCase = 7 UpperCAmelCase = 10 UpperCAmelCase = 1e-8 UpperCAmelCase = 1e5 def UpperCamelCase_ ( self : Dict ): return asdict(self ) def _snake_case ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = CTRLTokenizer UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : str ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] _UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) ) _UpperCamelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] _UpperCamelCase = {'''unk_token''': '''<unk>'''} _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_A ) ) def UpperCamelCase_ ( self : int , **_A : List[Any] ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int , _A : int ): _UpperCamelCase = '''adapt react readapt apt''' _UpperCamelCase = '''adapt react readapt apt''' return input_text, output_text def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCamelCase = '''adapt react readapt apt''' _UpperCamelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() _UpperCamelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) _UpperCamelCase = tokens + [tokenizer.unk_token] _UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "funnel" UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : Optional[int] , _A : List[Any]=3_0522 , _A : Dict=[4, 4, 4] , _A : List[Any]=None , _A : Tuple=2 , _A : int=768 , _A : Dict=12 , _A : List[str]=64 , _A : Optional[int]=3072 , _A : List[Any]="gelu_new" , _A : List[str]=0.1 , _A : str=0.1 , _A : Any=0.0 , _A : List[str]=0.1 , _A : Optional[Any]=None , _A : Optional[Any]=1e-9 , _A : Optional[Any]="mean" , _A : List[str]="relative_shift" , _A : Optional[Any]=True , _A : Optional[int]=True , _A : Optional[int]=True , **_A : str , ): _UpperCamelCase = vocab_size _UpperCamelCase = block_sizes _UpperCamelCase = [1] * len(_A ) if block_repeats is None else block_repeats assert len(_A ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." _UpperCamelCase = num_decoder_layers _UpperCamelCase = d_model _UpperCamelCase = n_head _UpperCamelCase = d_head _UpperCamelCase = d_inner _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = initializer_range _UpperCamelCase = initializer_std _UpperCamelCase = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported.""" _UpperCamelCase = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported.""" _UpperCamelCase = attention_type _UpperCamelCase = separate_cls _UpperCamelCase = truncate_seq _UpperCamelCase = pool_q_only super().__init__(**_A ) @property def UpperCamelCase_ ( self : int ): return sum(self.block_sizes ) @num_hidden_layers.setter def UpperCamelCase_ ( self : List[Any] , _A : Optional[int] ): raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def UpperCamelCase_ ( self : Tuple ): return len(self.block_sizes ) @num_blocks.setter def UpperCamelCase_ ( self : List[Any] , _A : Optional[int] ): raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0
from __future__ import annotations from random import choice def _snake_case ( __snake_case ): return choice(__snake_case ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = random_pivot(__snake_case ) # partition based on pivot # linear time _UpperCamelCase = [e for e in lst if e < pivot] _UpperCamelCase = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__snake_case ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__snake_case ) < k - 1: return kth_number(__snake_case , k - len(__snake_case ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__snake_case , __snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
709
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = (DPMSolverSDEScheduler,) UpperCAmelCase = 10 def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ): _UpperCamelCase = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_A ) return config def UpperCamelCase_ ( self : List[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCamelCase_ ( self : List[Any] ): for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCamelCase_ ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCamelCase_ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.scheduler_classes[0] _UpperCamelCase = self.get_scheduler_config() _UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) _UpperCamelCase = self.dummy_model() _UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma _UpperCamelCase = sample.to(_A ) for t in scheduler.timesteps: _UpperCamelCase = scheduler.scale_model_input(_A , _A ) _UpperCamelCase = model(_A , _A ) _UpperCamelCase = scheduler.step(_A , _A , _A ) _UpperCamelCase = output.prev_sample _UpperCamelCase = torch.sum(torch.abs(_A ) ) _UpperCamelCase = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
71
0
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["transformers", "torch", "note_seq"] def __init__( self : int , *_A : List[str] , **_A : List[Any] ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase_ ( cls : List[str] , *_A : str , **_A : Optional[int] ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Tuple ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
710
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowerCAmelCase_ : @property def UpperCamelCase_ ( self : Optional[int] ): return self.get_dummy_input() @property def UpperCamelCase_ ( self : Dict ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ): _UpperCamelCase = 4 _UpperCamelCase = 32 _UpperCamelCase = (32, 32) _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = torch.device(_A ) _UpperCamelCase = (batch_size, num_channels) + sizes _UpperCamelCase = randn_tensor(_A , generator=_A , device=_A ) _UpperCamelCase = {'''hidden_states''': hidden_states} if include_temb: _UpperCamelCase = 128 _UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A ) if include_res_hidden_states_tuple: _UpperCamelCase = torch.manual_seed(1 ) _UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),) if include_encoder_hidden_states: _UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A ) if include_skip_sample: _UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A ) return dummy_input def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": _UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop('''out_channels''' ) _UpperCamelCase = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) unet_block.to(_A ) unet_block.eval() with torch.no_grad(): _UpperCamelCase = unet_block(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCamelCase = output[0, -1, -3:, -3:] _UpperCamelCase = torch.tensor(_A ).to(_A ) assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' ) def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common() _UpperCamelCase = self.block_class(**_A ) model.to(_A ) model.train() _UpperCamelCase = model(**_A ) if isinstance(_A , _A ): _UpperCamelCase = output[0] _UpperCamelCase = torch.device(_A ) _UpperCamelCase = randn_tensor(output.shape , device=_A ) _UpperCamelCase = torch.nn.functional.mse_loss(_A , _A ) loss.backward()
71
0
def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = '''''' for i in table: res += inp[i - 1] return res def _snake_case ( __snake_case ): return data[1:] + data[0] def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = '''''' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = int('''0b''' + data[0] + data[-1] , 2 ) _UpperCamelCase = int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = message[:4] _UpperCamelCase = message[4:] _UpperCamelCase = apply_table(__snake_case , __snake_case ) _UpperCamelCase = xor(__snake_case , __snake_case ) _UpperCamelCase = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 _UpperCamelCase = apply_sbox(__snake_case , temp[4:] ) _UpperCamelCase = '''0''' * (2 - len(__snake_case )) + l # noqa: E741 _UpperCamelCase = '''0''' * (2 - len(__snake_case )) + r _UpperCamelCase = apply_table(l + r , __snake_case ) _UpperCamelCase = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": _lowerCAmelCase = input("Enter 10 bit key: ") _lowerCAmelCase = input("Enter 8 bit message: ") _lowerCAmelCase = [6, 3, 7, 4, 8, 5, 10, 9] _lowerCAmelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] _lowerCAmelCase = [2, 4, 3, 1] _lowerCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7] _lowerCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6] _lowerCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1] _lowerCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] _lowerCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation _lowerCAmelCase = apply_table(key, paa_table) _lowerCAmelCase = temp[:5] _lowerCAmelCase = temp[5:] _lowerCAmelCase = left_shift(left) _lowerCAmelCase = left_shift(right) _lowerCAmelCase = apply_table(left + right, pa_table) _lowerCAmelCase = left_shift(left) _lowerCAmelCase = left_shift(right) _lowerCAmelCase = left_shift(left) _lowerCAmelCase = left_shift(right) _lowerCAmelCase = apply_table(left + right, pa_table) # encryption _lowerCAmelCase = apply_table(message, IP) _lowerCAmelCase = function(expansion, sa, sa, keya, temp) _lowerCAmelCase = temp[4:] + temp[:4] _lowerCAmelCase = function(expansion, sa, sa, keya, temp) _lowerCAmelCase = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption _lowerCAmelCase = apply_table(CT, IP) _lowerCAmelCase = function(expansion, sa, sa, keya, temp) _lowerCAmelCase = temp[4:] + temp[:4] _lowerCAmelCase = function(expansion, sa, sa, keya, temp) _lowerCAmelCase = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
def _snake_case ( __snake_case , __snake_case ): # Check if the input is valid if not len(__snake_case ) == len(__snake_case ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = equationa _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = equationa # Calculate the determinants of the matrices _UpperCamelCase = aa * ba - aa * ba _UpperCamelCase = ca * ba - ca * ba _UpperCamelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _UpperCamelCase = determinant_x / determinant _UpperCamelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
712
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCAmelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): for attribute in key.split('''.''' ): _UpperCamelCase = getattr(__snake_case , __snake_case ) if weight_type is not None: _UpperCamelCase = getattr(__snake_case , __snake_case ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _UpperCamelCase = None for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True elif name.split('''.''' )[0] == "proj": _UpperCamelCase = fairseq_model.proj _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "bias" in name: _UpperCamelCase = '''bias''' elif "weight" in name: _UpperCamelCase = '''weight''' else: _UpperCamelCase = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _UpperCamelCase = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _UpperCamelCase = emb.weight.data return lin_layer def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = [line.split(''' ''' )[0] for line in lines] _UpperCamelCase = len(__snake_case ) _UpperCamelCase = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): _UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case ) _UpperCamelCase = SpeechaTextaConfig.from_pretrained( __snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case ) _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) _UpperCamelCase = model[0].eval() # set weights for wav2vec2 encoder _UpperCamelCase = WavaVecaModel(__snake_case ) _UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case ) _UpperCamelCase = SpeechaTextaForCausalLM(__snake_case ) _UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case ) # set output linear layer unexpected_keys.remove('''embed_out''' ) _UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) _UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case ) _UpperCamelCase = False # add projection layer _UpperCamelCase = nn.Parameter(projection_layer.weight ) _UpperCamelCase = nn.Parameter(projection_layer.bias ) _UpperCamelCase = create_vocab_dict(__snake_case ) with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp: json.dump(__snake_case , __snake_case ) _UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) ) tokenizer.save_pretrained(__snake_case ) _UpperCamelCase = hf_wavavec.config.to_dict() _UpperCamelCase = tokenizer.pad_token_id _UpperCamelCase = tokenizer.bos_token_id _UpperCamelCase = tokenizer.eos_token_id _UpperCamelCase = '''speech_to_text_2''' _UpperCamelCase = '''wav2vec2''' _UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case ) hf_wavavec.save_pretrained(__snake_case ) feature_extractor.save_pretrained(__snake_case ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _lowerCAmelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
71
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowerCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( default="cifar10", metadata={"help": "Name of a dataset from the datasets package"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "The column name of the images in the files."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "A folder containing the training data."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "A folder containing the validation data."} ) UpperCAmelCase = field( default=0.1_5, metadata={"help": "Percent to split off of train for validation."} ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = {} if self.train_dir is not None: _UpperCamelCase = self.train_dir if self.validation_dir is not None: _UpperCamelCase = self.validation_dir _UpperCamelCase = data_files if data_files else None @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) UpperCAmelCase = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Name or path of preprocessor config."} ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) UpperCAmelCase = field( default=0.7_5, metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = field( default=1e-3, metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def _snake_case ( __snake_case ): _UpperCamelCase = torch.stack([example['''pixel_values'''] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mae''' , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. _UpperCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _UpperCamelCase = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _UpperCamelCase = ds['''train'''].train_test_split(data_args.train_val_split ) _UpperCamelCase = split['''train'''] _UpperCamelCase = split['''test'''] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: _UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _UpperCamelCase = ViTMAEConfig() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { '''mask_ratio''': model_args.mask_ratio, '''norm_pix_loss''': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _UpperCamelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: _UpperCamelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) _UpperCamelCase = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _UpperCamelCase = ds['''train'''].column_names else: _UpperCamelCase = ds['''validation'''].column_names if data_args.image_column_name is not None: _UpperCamelCase = data_args.image_column_name elif "image" in column_names: _UpperCamelCase = '''image''' elif "img" in column_names: _UpperCamelCase = '''img''' else: _UpperCamelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _UpperCamelCase = image_processor.size['''shortest_edge'''] else: _UpperCamelCase = (image_processor.size['''height'''], image_processor.size['''width''']) _UpperCamelCase = Compose( [ Lambda(lambda __snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case ): _UpperCamelCase = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: _UpperCamelCase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: _UpperCamelCase = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _UpperCamelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _UpperCamelCase = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _UpperCamelCase = None if training_args.resume_from_checkpoint is not None: _UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCamelCase = last_checkpoint _UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCamelCase = trainer.evaluate() trainer.log_metrics('''eval''' , __snake_case ) trainer.save_metrics('''eval''' , __snake_case ) # Write model card and (optionally) push to hub _UpperCamelCase = { '''tasks''': '''masked-auto-encoding''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-auto-encoding'''], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
713
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = relative_attention _UpperCamelCase = position_biased_input _UpperCamelCase = pos_att_type _UpperCamelCase = scope def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCamelCase = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ): _UpperCamelCase = TFDebertaVaModel(config=_A ) _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _UpperCamelCase = [input_ids, input_mask] _UpperCamelCase = model(_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ): _UpperCamelCase = TFDebertaVaForMaskedLM(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ): _UpperCamelCase = self.num_labels _UpperCamelCase = TFDebertaVaForTokenClassification(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ): _UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A ) _UpperCamelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _UpperCamelCase = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self : Any ): _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = TFDebertaVaModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 ) def UpperCamelCase_ ( self : Any ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_A ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase_ ( self : List[Any] ): pass @slow def UpperCamelCase_ ( self : int ): _UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(_A , attention_mask=_A )[0] _UpperCamelCase = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
71
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
714
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): # Return True if there is node that has not iterated. _UpperCamelCase = [False] * len(__snake_case ) _UpperCamelCase = [] queue.append(__snake_case ) _UpperCamelCase = True while queue: _UpperCamelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__snake_case ) _UpperCamelCase = True _UpperCamelCase = u return visited[t] def _snake_case ( __snake_case , __snake_case , __snake_case ): # This array is filled by BFS and to store path _UpperCamelCase = [-1] * (len(__snake_case )) _UpperCamelCase = 0 while bfs(__snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = float('''Inf''' ) _UpperCamelCase = sink while s != source: # Find the minimum value in select path _UpperCamelCase = min(__snake_case , graph[parent[s]][s] ) _UpperCamelCase = parent[s] max_flow += path_flow _UpperCamelCase = sink while v != source: _UpperCamelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCamelCase = parent[v] return max_flow _lowerCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCAmelCase, _lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
71
0
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): if index == number_of_items: return 0 _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = knapsack(__snake_case , __snake_case , __snake_case , __snake_case , index + 1 ) if weights[index] <= max_weight: _UpperCamelCase = values[index] + knapsack( __snake_case , __snake_case , __snake_case , max_weight - weights[index] , index + 1 ) return max(__snake_case , __snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) _UpperCamelCase = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_A ) from datasets import load_dataset _UpperCamelCase = load_dataset('''nielsr/rvlcdip-demo''' ) _UpperCamelCase = dataset['''train'''][0]['''image'''].convert('''RGB''' ) _UpperCamelCase = image_processor(_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): _UpperCamelCase = model(**_A ) _UpperCamelCase = outputs.logits _UpperCamelCase = torch.Size((1, 16) ) self.assertEqual(logits.shape , _A ) _UpperCamelCase = torch.tensor( [-0.4158, -0.4092, -0.4347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4 ) )
716
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _UpperCamelCase = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } _UpperCamelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCamelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(_A , return_tensors='''np''' ) _UpperCamelCase = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = processor(text=_A ) _UpperCamelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def UpperCamelCase_ ( self : List[Any] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(_A ) _UpperCamelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCamelCase_ ( self : List[str] ): _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) _UpperCamelCase = '''lower newer''' _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
71
0
from collections.abc import Generator from math import sin def _snake_case ( __snake_case ): if len(__snake_case ) != 32: raise ValueError('''Input must be of length 32''' ) _UpperCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _snake_case ( __snake_case ): if i < 0: raise ValueError('''Input must be non-negative''' ) _UpperCamelCase = format(__snake_case , '''08x''' )[-8:] _UpperCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def _snake_case ( __snake_case ): _UpperCamelCase = B'''''' for char in message: bit_string += format(__snake_case , '''08b''' ).encode('''utf-8''' ) _UpperCamelCase = format(len(__snake_case ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__snake_case ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _snake_case ( __snake_case ): if len(__snake_case ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(__snake_case ) , 512 ): _UpperCamelCase = bit_string[pos : pos + 512] _UpperCamelCase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _snake_case ( __snake_case ): if i < 0: raise ValueError('''Input must be non-negative''' ) _UpperCamelCase = format(__snake_case , '''032b''' ) _UpperCamelCase = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(__snake_case , 2 ) def _snake_case ( __snake_case , __snake_case ): return (a + b) % 2**32 def _snake_case ( __snake_case , __snake_case ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _snake_case ( __snake_case ): _UpperCamelCase = preprocess(__snake_case ) _UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _UpperCamelCase = 0x6745_2301 _UpperCamelCase = 0xEFCD_AB89 _UpperCamelCase = 0x98BA_DCFE _UpperCamelCase = 0x1032_5476 _UpperCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__snake_case ): _UpperCamelCase = aa _UpperCamelCase = ba _UpperCamelCase = ca _UpperCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _UpperCamelCase = d ^ (b & (c ^ d)) _UpperCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _UpperCamelCase = c ^ (d & (b ^ c)) _UpperCamelCase = (5 * i + 1) % 16 elif i <= 47: _UpperCamelCase = b ^ c ^ d _UpperCamelCase = (3 * i + 5) % 16 else: _UpperCamelCase = c ^ (b | not_aa(__snake_case )) _UpperCamelCase = (7 * i) % 16 _UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 _UpperCamelCase = d _UpperCamelCase = c _UpperCamelCase = b _UpperCamelCase = sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) ) # Add hashed chunk to running total _UpperCamelCase = sum_aa(__snake_case , __snake_case ) _UpperCamelCase = sum_aa(__snake_case , __snake_case ) _UpperCamelCase = sum_aa(__snake_case , __snake_case ) _UpperCamelCase = sum_aa(__snake_case , __snake_case ) _UpperCamelCase = reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) return digest if __name__ == "__main__": import doctest doctest.testmod()
717
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
0
from __future__ import annotations def _snake_case ( __snake_case , __snake_case ): _UpperCamelCase = 0 _UpperCamelCase = len(__snake_case ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _UpperCamelCase = i + 1 else: _UpperCamelCase = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'{two_pointer([2, 7, 11, 15], 9) = }')
718
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
71
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCAmelCase = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "gpt_neox" def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ): super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def UpperCamelCase_ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , _A ) _UpperCamelCase = self.rope_scaling.get('''factor''' , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
71
0
import collections import os import re from pathlib import Path _lowerCAmelCase = "src/transformers" # Matches is_xxx_available() _lowerCAmelCase = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} _lowerCAmelCase = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowerCAmelCase = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available _lowerCAmelCase = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") _lowerCAmelCase = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowerCAmelCase = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", _lowerCAmelCase = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], _lowerCAmelCase = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo _lowerCAmelCase = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: _lowerCAmelCase = re.compile(r"^\s*try:") # Catches a line with else: _lowerCAmelCase = re.compile(r"^\s*else:") def _snake_case ( __snake_case ): if _re_test_backend.search(__snake_case ) is None: return None _UpperCamelCase = [b[0] for b in _re_backend.findall(__snake_case )] backends.sort() return "_and_".join(__snake_case ) def _snake_case ( __snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _UpperCamelCase = f.readlines() _UpperCamelCase = 0 while line_index < len(__snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__snake_case ): return None # First grab the objects without a specific backend in _import_structure _UpperCamelCase = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: _UpperCamelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__snake_case ): _UpperCamelCase = _re_one_line_import_struct.search(__snake_case ).groups()[0] _UpperCamelCase = re.findall(R'''\[([^\]]+)\]''' , __snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue _UpperCamelCase = _re_import_struct_key_value.search(__snake_case ) if single_line_import_search is not None: _UpperCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__snake_case ) > 0] objects.extend(__snake_case ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 _UpperCamelCase = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. _UpperCamelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCamelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCamelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): _UpperCamelCase = lines[line_index] if _re_import_struct_add_one.search(__snake_case ) is not None: objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(__snake_case ) is not None: _UpperCamelCase = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(''', ''' ) _UpperCamelCase = [obj[1:-1] for obj in imports if len(__snake_case ) > 0] objects.extend(__snake_case ) elif _re_between_brackets.search(__snake_case ) is not None: _UpperCamelCase = _re_between_brackets.search(__snake_case ).groups()[0].split(''', ''' ) _UpperCamelCase = [obj[1:-1] for obj in imports if len(__snake_case ) > 0] objects.extend(__snake_case ) elif _re_quote_object.search(__snake_case ) is not None: objects.append(_re_quote_object.search(__snake_case ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 _UpperCamelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _UpperCamelCase = [] while ( line_index < len(__snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): _UpperCamelCase = lines[line_index] _UpperCamelCase = _re_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 _UpperCamelCase = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(__snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _UpperCamelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCamelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCamelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): _UpperCamelCase = lines[line_index] _UpperCamelCase = _re_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 _UpperCamelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( __snake_case , __snake_case ): def find_duplicates(__snake_case ): return [k for k, v in collections.Counter(__snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _UpperCamelCase = [] for key in import_dict_objects.keys(): _UpperCamelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) _UpperCamelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _UpperCamelCase = '''base imports''' if key == '''none''' else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def _snake_case ( ): _UpperCamelCase = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: _UpperCamelCase = os.path.join(__snake_case , '''__init__.py''' ) _UpperCamelCase = parse_init(__snake_case ) if objects is not None: _UpperCamelCase = analyze_results(*__snake_case ) if len(__snake_case ) > 0: _UpperCamelCase = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(__snake_case ) ) if len(__snake_case ) > 0: raise ValueError('''\n\n'''.join(__snake_case ) ) def _snake_case ( ): _UpperCamelCase = [] for path, directories, files in os.walk(__snake_case ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(__snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__snake_case ) / folder).glob('''*.py''' ) ) ) == 0: continue _UpperCamelCase = str((Path(__snake_case ) / folder).relative_to(__snake_case ) ) _UpperCamelCase = short_path.replace(os.path.sep , '''.''' ) submodules.append(__snake_case ) for fname in files: if fname == "__init__.py": continue _UpperCamelCase = str((Path(__snake_case ) / fname).relative_to(__snake_case ) ) _UpperCamelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(__snake_case ) return submodules _lowerCAmelCase = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def _snake_case ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _UpperCamelCase = direct_transformers_import(__snake_case ) _UpperCamelCase = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' ) as f: _UpperCamelCase = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , __snake_case ) ) ) _UpperCamelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__snake_case ) > 0: _UpperCamelCase = '''\n'''.join(f"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' f"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
720
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__lowercase ): UpperCAmelCase = ["keras_nlp"] def __init__( self : Any , *_A : Dict , **_A : List[str] ): requires_backends(self , ['''keras_nlp'''] )
71
0
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __snake_case = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" } def _snake_case ( __snake_case = "dhaka" , __snake_case = 5 ): _UpperCamelCase = min(__snake_case , 50 ) # Prevent abuse! _UpperCamelCase = { '''q''': query, '''tbm''': '''isch''', '''hl''': '''en''', '''ijn''': '''0''', } _UpperCamelCase = requests.get('''https://www.google.com/search''' , params=__snake_case , headers=__snake_case ) _UpperCamelCase = BeautifulSoup(html.text , '''html.parser''' ) _UpperCamelCase = ''''''.join( re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) ) _UpperCamelCase = json.dumps(__snake_case ) _UpperCamelCase = json.loads(__snake_case ) _UpperCamelCase = re.findall( R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __snake_case , ) if not matched_google_image_data: return 0 _UpperCamelCase = re.sub( R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__snake_case ) , ) _UpperCamelCase = re.findall( R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __snake_case , ) for index, fixed_full_res_image in enumerate(__snake_case ): if index >= max_images: return index _UpperCamelCase = bytes(__snake_case , '''ascii''' ).decode( '''unicode-escape''' ) _UpperCamelCase = bytes(__snake_case , '''ascii''' ).decode( '''unicode-escape''' ) _UpperCamelCase = urllib.request.build_opener() _UpperCamelCase = [ ( '''User-Agent''', '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''', ) ] urllib.request.install_opener(__snake_case ) _UpperCamelCase = f"""query_{query.replace(" " , "_" )}""" if not os.path.exists(__snake_case ): os.makedirs(__snake_case ) urllib.request.urlretrieve( # noqa: S310 __snake_case , f"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __snake_case = download_images_from_google_query(sys.argv[1]) print(f'{image_count} images were downloaded to disk.') except IndexError: print("Please provide a search term.") raise
721
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _lowerCAmelCase = logging.get_logger(__name__) # General docstring _lowerCAmelCase = "RegNetConfig" # Base docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring _lowerCAmelCase = "facebook/regnet-y-040" _lowerCAmelCase = "tabby, tabby cat" _lowerCAmelCase = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ): super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) _UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity def UpperCamelCase_ ( self : Any , _A : Any ): _UpperCamelCase = self.convolution(self.padding(_A ) ) _UpperCamelCase = self.normalization(_A ) _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ): super().__init__(**_A ) _UpperCamelCase = config.num_channels _UpperCamelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ): _UpperCamelCase = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) ) _UpperCamelCase = self.embedder(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) _UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ): return self.normalization(self.convolution(_A ) , training=_A ) class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict , _A : int , _A : int , **_A : Dict ): super().__init__(**_A ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) _UpperCamelCase = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def UpperCamelCase_ ( self : List[str] , _A : List[Any] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCamelCase = self.pooler(_A ) for layer_module in self.attention: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = hidden_state * pooled return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Dict , _A : Tuple ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ): super().__init__(**_A ) _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width ) _UpperCamelCase = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) _UpperCamelCase = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] _UpperCamelCase = ACTaFN[config.hidden_act] def UpperCamelCase_ ( self : Tuple , _A : List[Any] ): _UpperCamelCase = hidden_state for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) _UpperCamelCase = self.shortcut(_A ) hidden_state += residual _UpperCamelCase = self.activation(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ): super().__init__(**_A ) _UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer _UpperCamelCase = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ): for layer_module in self.layers: _UpperCamelCase = layer_module(_A ) return hidden_state class lowerCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ): super().__init__(**_A ) _UpperCamelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) ) def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ): _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(_A ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class lowerCAmelCase_ ( tf.keras.layers.Layer ): UpperCAmelCase = RegNetConfig def __init__( self : int , _A : Tuple , **_A : int ): super().__init__(**_A ) _UpperCamelCase = config _UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' ) _UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' ) _UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(_A , training=_A ) _UpperCamelCase = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) _UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = RegNetConfig UpperCAmelCase = "regnet" UpperCAmelCase = "pixel_values" @property def UpperCamelCase_ ( self : Tuple ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} _lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" _lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", __lowercase, ) class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, ) class lowerCAmelCase_ ( __lowercase, __lowercase ): def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ): super().__init__(_A , *_A , **_A ) _UpperCamelCase = config.num_labels _UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head _UpperCamelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ): _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier[0](_A ) _UpperCamelCase = self.classifier[1](_A ) _UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
71
0
'''simple docstring''' def _snake_case ( __snake_case = 1000 ): _UpperCamelCase , _UpperCamelCase = 1, 1 _UpperCamelCase = 2 while True: _UpperCamelCase = 0 _UpperCamelCase = fa + fa _UpperCamelCase , _UpperCamelCase = fa, f index += 1 for _ in str(__snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
700
from sklearn.metrics import mean_squared_error import datasets _lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" _lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" _lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def UpperCamelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def UpperCamelCase_ ( self : Dict ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ): _UpperCamelCase = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
71
0
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 - _cos) / 2 _UpperCamelCase = 1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = (1 + _cos) / 2 _UpperCamelCase = -1 - _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = _sin / 2 _UpperCamelCase = 0 _UpperCamelCase = -ba _UpperCamelCase = 1 + alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 1 - alpha _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 + alpha _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = 1 + alpha * big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha * big_a _UpperCamelCase = 1 + alpha / big_a _UpperCamelCase = -2 * _cos _UpperCamelCase = 1 - alpha / big_a _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (pmc + aaa) _UpperCamelCase = 2 * big_a * mpc _UpperCamelCase = big_a * (pmc - aaa) _UpperCamelCase = ppmc + aaa _UpperCamelCase = -2 * pmpc _UpperCamelCase = ppmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ): _UpperCamelCase = tau * frequency / samplerate _UpperCamelCase = sin(__snake_case ) _UpperCamelCase = cos(__snake_case ) _UpperCamelCase = _sin / (2 * q_factor) _UpperCamelCase = 10 ** (gain_db / 40) _UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCamelCase = 2 * sqrt(__snake_case ) * alpha _UpperCamelCase = big_a * (ppmc + aaa) _UpperCamelCase = -2 * big_a * pmpc _UpperCamelCase = big_a * (ppmc - aaa) _UpperCamelCase = pmc + aaa _UpperCamelCase = 2 * mpc _UpperCamelCase = pmc - aaa _UpperCamelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
701
import os import re import shutil import sys import tempfile import unittest import black _lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class lowerCAmelCase_ ( unittest.TestCase ): def UpperCamelCase_ ( self : Any ): _UpperCamelCase = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _UpperCamelCase = self.diffusers_dir shutil.copy( os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ): _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _UpperCamelCase = black.format_str(_A , mode=_A ) _UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(_A , '''w''' , newline='''\n''' ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , '''r''' ) as f: self.assertTrue(f.read() , _A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(_A , _A ) def UpperCamelCase_ ( self : Optional[Any] ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , ) # Copy consistency with a really long name _UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
71
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase_ : def __init__( self : List[str] , _A : List[Any] , _A : List[Any]=13 , _A : List[Any]=30 , _A : Union[str, Any]=2 , _A : List[Any]=3 , _A : List[Any]=True , _A : Any=True , _A : Dict=32 , _A : Optional[Any]=2 , _A : Optional[Any]=4 , _A : List[str]=37 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=10 , _A : int=0.02 , _A : List[str]=3 , _A : Tuple=None , _A : Any=2 , ): _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = patch_size _UpperCamelCase = num_channels _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _UpperCamelCase = (image_size // patch_size) ** 2 _UpperCamelCase = num_patches + 2 def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCamelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self : List[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Dict , _A : Dict ): _UpperCamelCase = TFDeiTModel(config=_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self : str , _A : int , _A : List[str] , _A : int ): _UpperCamelCase = TFDeiTForMaskedImageModeling(config=_A ) _UpperCamelCase = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCamelCase = 1 _UpperCamelCase = TFDeiTForMaskedImageModeling(_A ) _UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : Any , _A : Tuple ): _UpperCamelCase = self.type_sequence_label_size _UpperCamelCase = TFDeiTForImageClassification(_A ) _UpperCamelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCamelCase = 1 _UpperCamelCase = TFDeiTForImageClassification(_A ) _UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCamelCase = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self : str ): _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ): UpperCAmelCase = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCAmelCase = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = TFDeiTModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCamelCase_ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def UpperCamelCase_ ( self : Union[str, Any] ): pass def UpperCamelCase_ ( self : Tuple ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) ) def UpperCamelCase_ ( self : Optional[int] ): _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(_A ) _UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def UpperCamelCase_ ( self : Union[str, Any] ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCamelCase_ ( self : int ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def UpperCamelCase_ ( self : Any , _A : Union[str, Any] , _A : List[Any] , _A : Any=False ): _UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def UpperCamelCase_ ( self : int ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFDeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def _snake_case ( ): _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : List[str] ): return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=_A , return_tensors='''tf''' ) # forward pass _UpperCamelCase = model(**_A ) # verify the logits _UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) _UpperCamelCase = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
702
from __future__ import annotations import math class lowerCAmelCase_ : def __init__( self : int , _A : int ): _UpperCamelCase = size # approximate the overall size of segment tree with given value _UpperCamelCase = [0 for i in range(0 , 4 * size )] # create array to store lazy update _UpperCamelCase = [0 for i in range(0 , 4 * size )] _UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update def UpperCamelCase_ ( self : str , _A : int ): return idx * 2 def UpperCamelCase_ ( self : Any , _A : int ): return idx * 2 + 1 def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ): if left_element == right_element: _UpperCamelCase = a[left_element - 1] else: _UpperCamelCase = (left_element + right_element) // 2 self.build(self.left(_A ) , _A , _A , _A ) self.build(self.right(_A ) , mid + 1 , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: _UpperCamelCase = val if left_element != right_element: _UpperCamelCase = val _UpperCamelCase = val _UpperCamelCase = True _UpperCamelCase = True return True _UpperCamelCase = (left_element + right_element) // 2 self.update(self.left(_A ) , _A , _A , _A , _A , _A ) self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A ) _UpperCamelCase = max( self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] ) return True def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ): if self.flag[idx] is True: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = False if left_element != right_element: _UpperCamelCase = self.lazy[idx] _UpperCamelCase = self.lazy[idx] _UpperCamelCase = True _UpperCamelCase = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] _UpperCamelCase = (left_element + right_element) // 2 _UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A ) _UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A ) return max(_A , _A ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowerCAmelCase = 15 _lowerCAmelCase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
71
0
def _snake_case ( __snake_case , __snake_case ): return price * (1 + tax_rate) if __name__ == "__main__": print(f'{price_plus_tax(100, 0.25) = }') print(f'{price_plus_tax(125.50, 0.05) = }')
703
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
71
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_ ( __lowercase ): def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ): super().__init__( _A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) _UpperCamelCase = field _UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths} _UpperCamelCase = Json( cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , ) def UpperCamelCase_ ( self : List[str] ): # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_ : def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _UpperCamelCase = num_proc _UpperCamelCase = '''utf-8''' _UpperCamelCase = to_json_kwargs def UpperCamelCase_ ( self : Optional[Any] ): _UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A ) _UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' ) _UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) _UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) _UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer: _UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) _UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs ) return written def UpperCamelCase_ ( self : Any , _A : Optional[Any] ): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args _UpperCamelCase = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) _UpperCamelCase = batch.to_pandas().to_json( path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ): _UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): _UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(_A ) else: _UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(_A ) return written
71
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) UpperCAmelCase = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field(default=__lowercase, metadata={"help": "The input training data file (a text file)."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) UpperCAmelCase = field( default=__lowercase, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) def UpperCamelCase_ ( self : Optional[int] ): if self.train_file is not None: _UpperCamelCase = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCamelCase = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase_ : UpperCAmelCase = 42 UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = None def __call__( self : Union[str, Any] , _A : Any ): _UpperCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels''' _UpperCamelCase = [feature.pop(_A ) for feature in features] _UpperCamelCase = len(_A ) _UpperCamelCase = len(features[0]['''input_ids'''] ) _UpperCamelCase = [ [{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features ] _UpperCamelCase = list(chain(*_A ) ) _UpperCamelCase = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) # Un-flatten _UpperCamelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()} # Add back labels _UpperCamelCase = torch.tensor(_A , dtype=torch.intaa ) return batch def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_swag''' , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__snake_case ) datasets.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCamelCase = {} if data_args.train_file is not None: _UpperCamelCase = data_args.train_file if data_args.validation_file is not None: _UpperCamelCase = data_args.validation_file _UpperCamelCase = data_args.train_file.split('''.''' )[-1] _UpperCamelCase = load_dataset( __snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _UpperCamelCase = load_dataset( '''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCamelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCamelCase = [f"""ending{i}""" for i in range(4 )] _UpperCamelCase = '''sent1''' _UpperCamelCase = '''sent2''' if data_args.max_seq_length is None: _UpperCamelCase = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( '''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value''' ''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can''' ''' override this default with `--block_size xxx`.''' ) _UpperCamelCase = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCamelCase = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__snake_case ): _UpperCamelCase = [[context] * 4 for context in examples[context_name]] _UpperCamelCase = examples[question_header_name] _UpperCamelCase = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__snake_case ) ] # Flatten out _UpperCamelCase = list(chain(*__snake_case ) ) _UpperCamelCase = list(chain(*__snake_case ) ) # Tokenize _UpperCamelCase = tokenizer( __snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) _UpperCamelCase = raw_datasets['''train'''] if data_args.max_train_samples is not None: _UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples ) _UpperCamelCase = train_dataset.select(range(__snake_case ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): _UpperCamelCase = train_dataset.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) _UpperCamelCase = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: _UpperCamelCase = min(len(__snake_case ) , data_args.max_eval_samples ) _UpperCamelCase = eval_dataset.select(range(__snake_case ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): _UpperCamelCase = eval_dataset.map( __snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _UpperCamelCase = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__snake_case ): _UpperCamelCase , _UpperCamelCase = eval_predictions _UpperCamelCase = np.argmax(__snake_case , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCamelCase = Trainer( model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , ) # Training if training_args.do_train: _UpperCamelCase = None if training_args.resume_from_checkpoint is not None: _UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCamelCase = last_checkpoint _UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCamelCase = train_result.metrics _UpperCamelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case ) ) _UpperCamelCase = min(__snake_case , len(__snake_case ) ) trainer.log_metrics('''train''' , __snake_case ) trainer.save_metrics('''train''' , __snake_case ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate() _UpperCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case ) _UpperCamelCase = min(__snake_case , len(__snake_case ) ) trainer.log_metrics('''eval''' , __snake_case ) trainer.save_metrics('''eval''' , __snake_case ) _UpperCamelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''multiple-choice''', '''dataset_tags''': '''swag''', '''dataset_args''': '''regular''', '''dataset''': '''SWAG''', '''language''': '''en''', } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
705
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ ( enum.Enum ): UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 @add_end_docstrings(__lowercase ) class lowerCAmelCase_ ( __lowercase ): UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Tuple , *_A : List[str] , **_A : str ): super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _UpperCamelCase = None if self.model.config.prefix is not None: _UpperCamelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _UpperCamelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params ) _UpperCamelCase = {**self._preprocess_params, **preprocess_params} _UpperCamelCase = {**self._forward_params, **forward_params} def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ): _UpperCamelCase = {} if prefix is not None: _UpperCamelCase = prefix if prefix: _UpperCamelCase = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" ''' [None, \'hole\']''' ) _UpperCamelCase = handle_long_generation preprocess_params.update(_A ) _UpperCamelCase = generate_kwargs _UpperCamelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _UpperCamelCase = ReturnType.TENSORS if return_type is not None: _UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[str] , _A : str , **_A : Any ): return super().__call__(_A , **_A ) def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ): _UpperCamelCase = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) _UpperCamelCase = prompt_text if handle_long_generation == "hole": _UpperCamelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_new_tokens'''] else: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _UpperCamelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _UpperCamelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ): _UpperCamelCase = model_inputs['''input_ids'''] _UpperCamelCase = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = 1 else: _UpperCamelCase = input_ids.shape[0] _UpperCamelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _UpperCamelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _UpperCamelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) _UpperCamelCase = generated_sequence.shape[0] if self.framework == "pt": _UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ): _UpperCamelCase = model_outputs['''generated_sequence'''][0] _UpperCamelCase = model_outputs['''input_ids'''] _UpperCamelCase = model_outputs['''prompt_text'''] _UpperCamelCase = generated_sequence.numpy().tolist() _UpperCamelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _UpperCamelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _UpperCamelCase = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _UpperCamelCase = 0 else: _UpperCamelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: _UpperCamelCase = prompt_text + text[prompt_length:] else: _UpperCamelCase = text[prompt_length:] _UpperCamelCase = {'''generated_text''': all_text} records.append(_A ) return records
71
0
def _snake_case ( __snake_case , __snake_case , __snake_case ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod else: _UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case ) return (b * b) % mod # a prime number _lowerCAmelCase = 701 _lowerCAmelCase = 1_000_000_000 _lowerCAmelCase = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
706
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ): _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss _UpperCamelCase = -(labels.shape[-1] * loss.item()) _UpperCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
71
0
import heapq def _snake_case ( __snake_case ): _UpperCamelCase = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(__snake_case , [-1 * len(__snake_case ), (key, value)] ) # chosen_vertices = set of chosen vertices _UpperCamelCase = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _UpperCamelCase = heapq.heappop(__snake_case )[1][0] chosen_vertices.add(__snake_case ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _UpperCamelCase = elem[1][1].index(__snake_case ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(__snake_case ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
707
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class lowerCAmelCase_ : UpperCAmelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) UpperCAmelCase = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) UpperCAmelCase = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} ) UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase = field( default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def _snake_case ( __snake_case , __snake_case , __snake_case ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__snake_case , __snake_case , __snake_case ): assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) ) _UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__snake_case , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _UpperCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__snake_case , __snake_case ): _UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__snake_case ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _UpperCamelCase = SeqaSeqDataset # Get datasets _UpperCamelCase = ( dataset_class( __snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _UpperCamelCase = ( dataset_class( __snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer _UpperCamelCase = ( build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None ) _UpperCamelCase = SeqaSeqTrainer( model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator( __snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , ) _UpperCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) _UpperCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _UpperCamelCase = train_result.metrics _UpperCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) _UpperCamelCase = data_args.n_val _UpperCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.do_predict: logger.info('''*** Predict ***''' ) _UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' ) _UpperCamelCase = test_output.metrics _UpperCamelCase = data_args.n_test if trainer.is_world_process_zero(): _UpperCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __snake_case , training_args.output_dir ) all_metrics.update(__snake_case ) if training_args.predict_with_generate: _UpperCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _UpperCamelCase = lmap(str.strip , __snake_case ) write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def _snake_case ( __snake_case ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
71
0
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput _lowerCAmelCase = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase_ ( __lowercase ): def __init__( self : List[Any] , *_A : Any , _A : Tuple=None , _A : Any=None , _A : Union[str, Any]=None , **_A : str ): super().__init__(*_A , **_A ) _UpperCamelCase = eval_examples _UpperCamelCase = post_process_function _UpperCamelCase = quant_trainer_args _UpperCamelCase = 128 # default number of calibration samples def UpperCamelCase_ ( self : List[Any] , _A : Optional[int]=None ): if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) _UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset _UpperCamelCase = self._remove_unused_columns(_A , description='''Calibration''' ) return DataLoader( _A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , ) def UpperCamelCase_ ( self : List[str] , _A : Any=None ): _UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset _UpperCamelCase = self.get_calib_dataloader(_A ) _UpperCamelCase = self.model quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A ) model.eval() quant_trainer.enable_calibration(_A ) logger.info('''***** Running calibration *****''' ) logger.info(F""" Num examples = {self.calib_num}""" ) logger.info(F""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(_A ): # Prediction step _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prediction_step(_A , _A , prediction_loss_only=_A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(_A , self.quant_trainer_args ) _UpperCamelCase = model def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[int]=None , _A : Dict=None , _A : Optional[int]=None , _A : str = "eval" ): _UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCamelCase = self.get_eval_dataloader(_A ) _UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCamelCase = self.compute_metrics _UpperCamelCase = None _UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCamelCase = eval_loop( _A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , ) finally: _UpperCamelCase = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _UpperCamelCase = self.post_process_function(_A , _A , output.predictions ) _UpperCamelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCamelCase = metrics.pop(_A ) self.log(_A ) else: _UpperCamelCase = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A ) return metrics def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Any , _A : Dict=None , _A : str = "test" ): _UpperCamelCase = self.get_test_dataloader(_A ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCamelCase = self.compute_metrics _UpperCamelCase = None _UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCamelCase = eval_loop( _A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , ) finally: _UpperCamelCase = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _UpperCamelCase = self.post_process_function(_A , _A , output.predictions , '''predict''' ) _UpperCamelCase = self.compute_metrics(_A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCamelCase = metrics.pop(_A ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A ) def UpperCamelCase_ ( self : str , _A : str="./" ): _UpperCamelCase = self.eval_dataset _UpperCamelCase = self.get_eval_dataloader(_A ) _UpperCamelCase = next(iter(_A ) ) # saving device - to make it consistent _UpperCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple _UpperCamelCase = tuple(v.to(_A ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer _UpperCamelCase = True _UpperCamelCase = self.model.to(_A ) model.eval() model.float() _UpperCamelCase = model.module if hasattr(_A , '''module''' ) else model quant_trainer.configure_model(_A , self.quant_trainer_args ) _UpperCamelCase = os.path.join(_A , '''model.onnx''' ) logger.info(F"""exporting model to {output_model_file}""" ) _UpperCamelCase = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( _A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=_A , ) logger.info('''onnx export finished''' )
708
from __future__ import annotations import typing from collections import Counter def _snake_case ( __snake_case ): _UpperCamelCase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(__snake_case , max_perimeter + 1 ): _UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(__snake_case ): _UpperCamelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _snake_case ( __snake_case = 1000 ): _UpperCamelCase = pythagorean_triple(__snake_case ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(f'Perimeter {solution()} has maximum solutions')
71
0