code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from math import ceil, sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : Optional[int] = 0
for outer_width in range(3 ,(limit // 4) + 2 ):
if outer_width**2 > limit:
A_ : int = max(ceil(sqrt(outer_width**2 - limit ) ) ,1 )
else:
A_ : Optional[int] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
A_ : int = feature_size
A_ : List[Any] = sampling_rate
A_ : Dict = padding_value
A_ : List[Any] = kwargs.pop('padding_side' , 'right' )
A_ : str = kwargs.pop('return_attention_mask' , lowercase )
super().__init__(**lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A_ : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
A_ : Union[str, Any] = processed_features[self.model_input_names[0]]
A_ : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
A_ : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A_ : Optional[Any] = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A_ : str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
A_ : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
A_ : List[str] = 'tf'
elif is_torch_tensor(lowercase ):
A_ : Tuple = 'pt'
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
A_ : str = 'np'
else:
raise ValueError(
F'''type of {first_element} unknown: {type(lowercase )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A_ : Dict = to_numpy(lowercase )
else:
A_ : str = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
A_ : List[Any] = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
A_ : Union[str, Any] = processed_features[self.model_input_names[0]]
A_ : List[str] = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A_ : List[str] = []
for i in range(lowercase ):
A_ : Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
A_ : Tuple = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A_ : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A_ : Optional[Any] = PaddingStrategy.MAX_LENGTH
A_ : List[str] = {}
for i in range(lowercase ):
# padding
A_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
A_ : Optional[int] = []
if value.dtype is np.dtype(np.floataa ):
A_ : Any = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A_ : List[Any] = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A_ : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A_ : List[Any] = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
A_ : int = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
A_ : Union[str, Any] = np.pad(
processed_features['attention_mask'] , (0, difference) )
A_ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A_ : str = np.pad(
lowercase , lowercase , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A_ : Any = np.pad(
processed_features['attention_mask'] , (difference, 0) )
A_ : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A_ : Union[str, Any] = np.pad(
lowercase , lowercase , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A_ : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A_ : Optional[int] = len(lowercase ) > max_length
if needs_to_be_truncated:
A_ : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A_ : Dict = processed_features['attention_mask'][:max_length]
return processed_features
def lowerCAmelCase_ ( self , lowercase=False , lowercase=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
A_ : Union[str, Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
A_ : Optional[int] = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
A_ : str = padding
else:
A_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 0 |
# flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 707 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_UpperCAmelCase = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Dict=None ,__lowercase : Any=None ,__lowercase : str=None ):
'''simple docstring'''
A_ : Tuple = True
while ask_again:
A_ : Union[str, Any] = input(__lowercase )
try:
if default is not None and len(__lowercase ) == 0:
return default
return convert_value(__lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__lowercase )
def UpperCamelCase ( __lowercase : Any ,__lowercase : str=[] ,__lowercase : Dict=None ,__lowercase : Union[str, Any]=0 ):
'''simple docstring'''
A_ : List[Any] = BulletMenu(__lowercase ,__lowercase )
A_ : Union[str, Any] = menu.run(default_choice=__lowercase )
return convert_value(__lowercase ) if convert_value is not None else result
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Dict = int(__lowercase )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = int(__lowercase )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Any = int(__lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = int(__lowercase )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = int(__lowercase )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = super()._format_usage(lowercase , lowercase , lowercase , lowercase )
A_ : Union[str, Any] = usage.replace('<command> [<args>] ' , '' )
return usage
| 708 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
self.check_model_type(lowercase )
def lowerCAmelCase_ ( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : List[Any] = {}, {}
if padding is not None:
A_ : Union[str, Any] = padding
if truncation is not None:
A_ : Optional[Any] = truncation
if top_k is not None:
A_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
if isinstance(lowercase , (Image.Image, str) ) and isinstance(lowercase , lowercase ):
A_ : Tuple = {'image': image, 'question': question}
else:
A_ : Union[str, Any] = image
A_ : str = super().__call__(lowercase , **lowercase )
return results
def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=False ):
"""simple docstring"""
A_ : Any = load_image(inputs['image'] )
A_ : Any = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase , truncation=lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors=self.framework )
model_inputs.update(lowercase )
return model_inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.model(**lowercase )
return model_outputs
def lowerCAmelCase_ ( self , lowercase , lowercase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A_ : str = self.model.config.num_labels
if self.framework == "pt":
A_ : int = model_outputs.logits.sigmoid()[0]
A_ : Optional[int] = probs.topk(lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
A_ : List[str] = scores.tolist()
A_ : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 709 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_UpperCAmelCase = False
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : int = pipe.dual_guided(
prompt='first prompt' , image=lowercase , text_to_image_strength=0.75 , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
A_ : Any = VersatileDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = generator.manual_seed(0 )
A_ : int = pipe.dual_guided(
prompt='first prompt' , image=lowercase , text_to_image_strength=0.75 , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = 'cyberpunk 2077'
A_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A_ : Any = torch.manual_seed(0 )
A_ : Optional[int] = pipe.dual_guided(
prompt=lowercase , image=lowercase , text_to_image_strength=0.75 , generator=lowercase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
A_ : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ : Optional[int] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A_ : Dict = 'A painting of a squirrel eating a burger '
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Tuple = pipe.text_to_image(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
A_ : Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A_ : Tuple = pipe.image_variation(lowercase , generator=lowercase , output_type='numpy' ).images
A_ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ : List[str] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 710 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 711 | import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | 0 |
from ... import PretrainedConfig
_UpperCAmelCase = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase_ = '''nezha'''
def __init__( self , lowercase=2_1_1_2_8 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=6_4 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase=0 , lowercase=2 , lowercase=3 , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Dict = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Tuple = max_relative_position
A_ : str = type_vocab_size
A_ : str = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[int] = classifier_dropout
A_ : str = use_cache
| 712 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 0 |
from PIL import Image
def UpperCamelCase ( __lowercase : Image ):
'''simple docstring'''
A_ : Any = image.size
A_ : str = 0
A_ : Union[str, Any] = image.load()
for i in range(__lowercase ):
for j in range(__lowercase ):
A_ : List[str] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowercase ):
for i in range(__lowercase ):
A_ : List[Any] = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 713 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : List[str]=False ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Union[str, Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Optional[int] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : int = in_proj_weight[
: config.hidden_size, :
]
A_ : Optional[int] = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : str = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Tuple ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = dct.pop(__lowercase )
A_ : Optional[Any] = val
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : List[str] = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = DeiTConfig()
# all deit models have fine-tuned heads
A_ : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ : int = 10_00
A_ : str = 'huggingface/label-files'
A_ : Any = 'imagenet-1k-id2label.json'
A_ : Any = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
A_ : List[Any] = int(deit_name[-6:-4] )
A_ : int = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
A_ : Union[str, Any] = 1_92
A_ : str = 7_68
A_ : List[Any] = 12
A_ : Optional[int] = 3
elif deit_name[9:].startswith('small' ):
A_ : Any = 3_84
A_ : str = 15_36
A_ : str = 12
A_ : int = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
A_ : List[str] = 10_24
A_ : Union[str, Any] = 40_96
A_ : int = 24
A_ : Dict = 16
# load original model from timm
A_ : Dict = timm.create_model(__lowercase ,pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[int] = timm_model.state_dict()
A_ : str = create_rename_keys(__lowercase ,__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase ,__lowercase ,__lowercase )
read_in_q_k_v(__lowercase ,__lowercase ,__lowercase )
# load HuggingFace model
A_ : str = DeiTForImageClassificationWithTeacher(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ : Tuple = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ : int = DeiTImageProcessor(size=__lowercase ,crop_size=config.image_size )
A_ : Optional[Any] = image_processor(images=prepare_img() ,return_tensors='pt' )
A_ : Optional[int] = encoding['pixel_values']
A_ : List[str] = model(__lowercase )
A_ : List[Any] = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase ,outputs.logits ,atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 714 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 0 |
_UpperCAmelCase = 8.314_4598
def UpperCamelCase ( __lowercase : float ,__lowercase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_UpperCAmelCase = 300
_UpperCAmelCase = 28
_UpperCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 715 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 0 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 716 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = IFInpaintingPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Any = torch.manual_seed(lowercase )
else:
A_ : Tuple = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 717 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = StableUnCLIPImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase_ = frozenset([] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 3_2
A_ : Tuple = embedder_hidden_size
# image encoding components
A_ : Optional[Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
torch.manual_seed(0 )
A_ : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase , projection_dim=lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
A_ : int = StableUnCLIPImageNormalizer(embedding_dim=lowercase )
A_ : Optional[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A_ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase , layers_per_block=1 , upcast_attention=lowercase , use_linear_projection=lowercase , )
torch.manual_seed(0 )
A_ : Dict = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase , steps_offset=1 , )
torch.manual_seed(0 )
A_ : Dict = AutoencoderKL()
A_ : Dict = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 , lowercase=True ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : Union[str, Any] = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase )
if pil_image:
A_ : Any = input_image * 0.5 + 0.5
A_ : str = input_image.clamp(0 , 1 )
A_ : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A_ : int = DiffusionPipeline.numpy_to_pil(lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : Optional[int] = StableUnCLIPImgaImgPipeline(**lowercase )
A_ : int = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = self.get_dummy_inputs(lowercase )
inputs.update({'image_embeds': None} )
A_ : Optional[int] = sd_pipe(**lowercase ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A_ : Dict = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
A_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = pipe(lowercase , 'anime turle' , generator=lowercase , output_type='np' )
A_ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
A_ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
A_ : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : str = pipe(lowercase , 'anime turle' , generator=lowercase , output_type='np' )
A_ : int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A_ : Optional[Any] = pipe(
lowercase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
A_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 718 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_UpperCAmelCase = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class UpperCAmelCase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , lowercase = " " ):
"""simple docstring"""
A_ : int = sentence_delimiter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return list(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Tuple = []
for sent_idx, sentence in enumerate(lowercase ):
chars.extend(self.process_string(lowercase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase ) - 1:
chars.append(self.sentence_delimiter )
return chars
_UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_UpperCAmelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_UpperCAmelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_UpperCAmelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
A_ : Tuple = 0
A_ : str = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : int = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 719 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 0 |
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bert-generation'''
def __init__( self , lowercase=5_0_3_5_8 , lowercase=1_0_2_4 , lowercase=2_4 , lowercase=1_6 , lowercase=4_0_9_6 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase=2 , lowercase=1 , lowercase="absolute" , lowercase=True , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : int = vocab_size
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : Union[str, Any] = intermediate_size
A_ : str = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : List[str] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : int = position_embedding_type
A_ : Optional[int] = use_cache
| 720 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=6_4 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
"""simple docstring"""
A_ : Union[str, Any] = parent
A_ : List[str] = batch_size
A_ : Dict = seq_length
A_ : Any = is_training
A_ : int = use_input_mask
A_ : Optional[Any] = use_token_type_ids
A_ : List[Any] = use_labels
A_ : int = vocab_size
A_ : Optional[int] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Any = hidden_act
A_ : int = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : int = num_labels
A_ : Union[str, Any] = num_choices
A_ : Tuple = scope
A_ : List[Any] = vocab_size - 1
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : int = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ : str = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = GPTNeoXModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = True
A_ : Dict = GPTNeoXModel(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[int] = GPTNeoXForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.num_labels
A_ : Tuple = GPTNeoXForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self.num_labels
A_ : Tuple = GPTNeoXForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = self.num_labels
A_ : str = GPTNeoXForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[Any] = True
A_ : Any = GPTNeoXForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
# first forward pass
A_ : str = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
A_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Any = model(lowercase , attention_mask=lowercase , output_hidden_states=lowercase )
A_ : int = output_from_no_past['hidden_states'][0]
A_ : Tuple = model(
lowercase , attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
# select random slice
A_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.prepare_config_and_inputs()
A_ : Optional[int] = config_and_inputs
A_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = GPTNeoXModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , hidden_size=6_4 , num_attention_heads=8 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : Dict = None
self.model_tester.create_and_check_model_as_decoder(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = ids_tensor([1, 1_0] , config.vocab_size )
A_ : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = GPTNeoXModel(lowercase )
original_model.to(lowercase )
original_model.eval()
A_ : Tuple = original_model(lowercase ).last_hidden_state
A_ : Any = original_model(lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A_ : Union[str, Any] = {'type': scaling_type, 'factor': 10.0}
A_ : List[str] = GPTNeoXModel(lowercase )
scaled_model.to(lowercase )
scaled_model.eval()
A_ : Tuple = scaled_model(lowercase ).last_hidden_state
A_ : Optional[Any] = scaled_model(lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A_ : Optional[int] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase )
A_ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A_ : Any = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A_ : List[Any] = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=2_0 )
A_ : Optional[Any] = tokenizer.batch_decode(lowercase )[0]
self.assertEqual(lowercase , lowercase )
| 721 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 0 |
'''simple docstring'''
def UpperCamelCase ( __lowercase : float ,__lowercase : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(__lowercase ) ,__lowercase )
return number - int(__lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 700 | from math import sqrt
def UpperCamelCase ( __lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : int = 0
A_ : int = 0
A_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowercase ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = TransfoXLTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : int = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = '<unk> UNwanted , running'
A_ : Dict = '<unk> unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowercase )
A_ : str = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(lowercase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [0, 4, 8, 7] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TransfoXLTokenizer(lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TransfoXLTokenizer(lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = TransfoXLTokenizer(lower_case=lowercase )
A_ : Dict = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
A_ : List[str] = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(lowercase ) , lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.get_tokenizer()
A_ : Dict = len(lowercase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowercase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 701 | import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ):
"""simple docstring"""
super().__init__()
A_ : Tuple = initial_learning_rate
A_ : List[str] = warmup_steps
A_ : int = power
A_ : Dict = decay_schedule_fn
A_ : Any = name
def __call__( self , lowercase ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A_ : Optional[int] = tf.cast(lowercase , tf.floataa )
A_ : int = tf.cast(self.warmup_steps , tf.floataa )
A_ : Optional[int] = global_step_float / warmup_steps_float
A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,):
'''simple docstring'''
A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,)
if num_warmup_steps:
A_ : Tuple = WarmUp(
initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,)
if weight_decay_rate > 0.0:
A_ : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,)
else:
A_ : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ):
"""simple docstring"""
super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
A_ : Dict = weight_decay_rate
A_ : Union[str, Any] = include_in_weight_decay
A_ : str = exclude_from_weight_decay
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : Tuple = {'WarmUp': WarmUp}
return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase )
A_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ):
"""simple docstring"""
A_ , A_ : Optional[int] = list(zip(*lowercase ) )
return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A_ : List[str] = apply_state or {}
A_ : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A_ : Dict = self._fallback_apply_state(lowercase , lowercase )
A_ : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase )
A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase )
with tf.control_dependencies([decay] ):
return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase , lowercase ) is not None:
return False
return True
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : int = []
A_ : Optional[int] = None
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self._accum_steps is None:
A_ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase ):
"""simple docstring"""
if not self._gradients:
A_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' )
for accum_gradient, gradient in zip(self._gradients , lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase )
self._accum_steps.assign_add(1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase ) )
| 70 | 0 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A_ : List[Any] = hex_num[0] == '-'
if is_negative:
A_ : Union[str, Any] = hex_num[1:]
try:
A_ : Union[str, Any] = int(__lowercase ,16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A_ : List[Any] = ''
while int_num > 0:
A_ : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 0 |
'''simple docstring'''
def UpperCamelCase ( __lowercase : int ,__lowercase : int ):
'''simple docstring'''
while second != 0:
A_ : List[str] = first & second
first ^= second
A_ : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = int(input("""Enter the first number: """).strip())
_UpperCAmelCase = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 703 | def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : List[Any] = sum(__lowercase )
A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
A_ : Optional[Any] = True
for i in range(1 ,s + 1 ):
A_ : Tuple = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
A_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
A_ : List[Any] = s - 2 * j
break
return diff
| 70 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowercase , **lowercase ):
"""simple docstring"""
return super().__call__(lowercase , **lowercase )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : List[Any] = {}
if "candidate_labels" in kwargs:
A_ : List[str] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A_ : Dict = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase_ ( self , lowercase , lowercase=None , lowercase="This is a photo of {}." ):
"""simple docstring"""
A_ : int = load_image(lowercase )
A_ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
A_ : Dict = candidate_labels
A_ : int = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : int = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Optional[int] = [text_inputs]
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = model_inputs.pop('candidate_labels' )
A_ : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowercase ):
A_ : Optional[Any] = text_inputs[0]
else:
# Batching case.
A_ : Optional[Any] = text_inputs[0][0]
A_ : Tuple = self.model(**lowercase , **lowercase )
A_ : Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = model_outputs.pop('candidate_labels' )
A_ : str = model_outputs['logits'][0]
if self.framework == "pt":
A_ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
A_ : int = probs.tolist()
if not isinstance(lowercase , lowercase ):
A_ : Any = [scores]
elif self.framework == "tf":
A_ : Union[str, Any] = stable_softmax(lowercase , axis=-1 )
A_ : Any = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
A_ : Tuple = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result
| 704 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=3_0 , lowercase=4_0_0 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 2_5_5 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ):
"""simple docstring"""
A_ : List[str] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
A_ : Union[str, Any] = parent
A_ : List[str] = batch_size
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = min_resolution
A_ : Optional[int] = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Any = size
A_ : Optional[Any] = do_rescale
A_ : Any = rescale_factor
A_ : Dict = do_normalize
A_ : Union[str, Any] = image_mean
A_ : Tuple = image_std
A_ : Optional[int] = do_pad
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self , lowercase , lowercase=False ):
"""simple docstring"""
if not batched:
A_ : List[Any] = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A_ : List[str] = image.size
else:
A_ : int = image.shape[1], image.shape[2]
if w < h:
A_ : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
A_ : Optional[int] = self.size['shortest_edge']
elif w > h:
A_ : Dict = self.size['shortest_edge']
A_ : List[Any] = int(self.size['shortest_edge'] * w / h )
else:
A_ : int = self.size['shortest_edge']
A_ : int = self.size['shortest_edge']
else:
A_ : Tuple = []
for image in image_inputs:
A_ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Any = max(lowercase , key=lambda lowercase : item[0] )[0]
A_ : Optional[int] = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = DetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase , 'rescale_factor' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_pad' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase )
A_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ : Dict = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A_ : List[str] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ : Dict = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[str] = image_processing(lowercase , return_tensors='pt' ).pixel_values
A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A_ : List[Any] = json.loads(f.read() )
A_ : int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
A_ : List[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
A_ : Any = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
A_ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
A_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Optional[int] = torch.tensor([5887.9600, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
A_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
A_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
A_ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
A_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
A_ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A_ : int = json.loads(f.read() )
A_ : Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
A_ : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A_ : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
A_ : Optional[Any] = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
A_ : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
A_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
A_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
A_ : List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
A_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
A_ : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
A_ : str = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
A_ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
A_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
| 705 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCAmelCase = None
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCAmelCase = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ = TaTokenizer
lowerCamelCase_ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=1_0_0 , lowercase=None , **lowercase , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
A_ : Tuple = [F'''<extra_id_{i}>''' for i in range(lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
A_ : Dict = len(set(filter(lambda lowercase : bool('extra_id_' in str(lowercase ) ) , lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowercase , tokenizer_file=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , **lowercase , )
A_ : List[str] = vocab_file
A_ : List[Any] = False if not self.vocab_file else True
A_ : Optional[int] = extra_ids
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
A_ : Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase , )
return max_model_length
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : int = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
A_ : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return list(
set(filter(lambda lowercase : bool(re.search(r'<extra_id_\d+>' , lowercase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return [self.convert_tokens_to_ids(lowercase ) for token in self.get_sentinel_tokens()]
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = Dict[str, Any]
_UpperCAmelCase = List[Prediction]
@add_end_docstrings(__A )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(*lowercase , **lowercase )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : List[Any] = {}
if "threshold" in kwargs:
A_ : Dict = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *lowercase , **lowercase ):
"""simple docstring"""
return super().__call__(*lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = load_image(lowercase )
A_ : List[Any] = torch.IntTensor([[image.height, image.width]] )
A_ : List[str] = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
A_ : Union[str, Any] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
A_ : List[str] = target_size
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = model_inputs.pop('target_size' )
A_ : Dict = self.model(**lowercase )
A_ : Dict = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
A_ : List[str] = model_inputs['bbox']
return model_outputs
def lowerCAmelCase_ ( self , lowercase , lowercase=0.9 ):
"""simple docstring"""
A_ : str = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ : Union[str, Any] = target_size[0].tolist()
def unnormalize(lowercase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
A_ : Optional[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ : Tuple = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ : str = [unnormalize(lowercase ) for bbox in model_outputs['bbox'].squeeze(0 )]
A_ : Tuple = ['score', 'label', 'box']
A_ : Any = [dict(zip(lowercase , lowercase ) ) for vals in zip(scores.tolist() , lowercase , lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ : List[Any] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase )
A_ : Tuple = raw_annotations[0]
A_ : List[Any] = raw_annotation['scores']
A_ : Any = raw_annotation['labels']
A_ : Union[str, Any] = raw_annotation['boxes']
A_ : List[Any] = scores.tolist()
A_ : str = [self.model.config.idalabel[label.item()] for label in labels]
A_ : Union[str, Any] = [self._get_bounding_box(lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ : Optional[int] = ['score', 'label', 'box']
A_ : str = [
dict(zip(lowercase , lowercase ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
A_ : Dict = box.int().tolist()
A_ : Union[str, Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 707 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_UpperCAmelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
_UpperCAmelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = F'''facebook/wmt19-{pair}'''
A_ : Optional[Any] = self.get_tokenizer(lowercase )
A_ : Dict = self.get_model(lowercase )
A_ : Tuple = bleu_data[pair]['src']
A_ : List[str] = bleu_data[pair]['tgt']
A_ : List[Any] = tokenizer(lowercase , return_tensors='pt' , truncation=lowercase , padding='longest' ).to(lowercase )
A_ : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A_ : List[Any] = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
A_ : Union[str, Any] = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores['bleu'] , lowercase )
| 708 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70 | 0 |
def UpperCamelCase ( __lowercase : int = 50 ):
'''simple docstring'''
A_ : List[Any] = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = StableDiffusionLDMaDPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
A_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
A_ : Optional[Any] = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : List[Any] = torch.manual_seed(lowercase )
else:
A_ : int = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Dict = self.get_dummy_components()
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowercase )
A_ : str = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : List[str] = ldmad_pipe(**lowercase )
A_ : List[Any] = output.rgb, output.depth
A_ : int = rgb[0, -3:, -3:, -1]
A_ : Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
A_ : Any = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
A_ : Dict = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_dummy_components()
A_ : Any = StableDiffusionLDMaDPipeline(**lowercase )
A_ : Optional[int] = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : Optional[Any] = self.get_dummy_inputs(lowercase )
A_ : Optional[int] = 3 * [inputs['prompt']]
# forward
A_ : int = ldmad_pipe(**lowercase )
A_ : List[str] = output.rgb, output.depth
A_ : str = rgb_slice_a[0, -3:, -3:, -1]
A_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
A_ : Union[str, Any] = self.get_dummy_inputs(lowercase )
A_ : List[Any] = 3 * [inputs.pop('prompt' )]
A_ : int = ldmad_pipe.tokenizer(
lowercase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
A_ : int = text_inputs['input_ids'].to(lowercase )
A_ : Optional[Any] = ldmad_pipe.text_encoder(lowercase )[0]
A_ : int = prompt_embeds
# forward
A_ : str = ldmad_pipe(**lowercase )
A_ : List[str] = output.rgb, output.depth
A_ : Optional[int] = rgb_slice_a[0, -3:, -3:, -1]
A_ : List[str] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Optional[int] = self.get_dummy_components()
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase )
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowercase )
A_ : Optional[Any] = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : Dict = self.get_dummy_inputs(lowercase )
A_ : Any = 'french fries'
A_ : Optional[int] = ldmad_pipe(**lowercase , negative_prompt=lowercase )
A_ : Any = output.rgb, output.depth
A_ : str = rgb[0, -3:, -3:, -1]
A_ : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
A_ : List[str] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
A_ : str = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0 ):
"""simple docstring"""
A_ : int = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = np.random.RandomState(lowercase ).standard_normal((1, 4, 6_4, 6_4) )
A_ : int = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
A_ : Tuple = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
A_ : List[Any] = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : Tuple = self.get_inputs(lowercase )
A_ : int = ldmad_pipe(**lowercase )
A_ : Union[str, Any] = output.rgb, output.depth
A_ : Dict = rgb[0, -3:, -3:, -1].flatten()
A_ : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
A_ : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
A_ : Any = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0 ):
"""simple docstring"""
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Optional[Any] = np.random.RandomState(lowercase ).standard_normal((1, 4, 6_4, 6_4) )
A_ : Optional[int] = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
A_ : Tuple = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 5_0,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = self.get_inputs(lowercase )
A_ : int = ldmad_pipe(**lowercase )
A_ : Any = output.rgb, output.depth
A_ : str = 0.49_5586
A_ : List[Any] = 0.3379_5515
A_ : Optional[Any] = 112.4_8518
A_ : int = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
A_ : List[Any] = self.get_inputs(lowercase )
A_ : int = ldmad_pipe(**lowercase )
A_ : Tuple = output.rgb, output.depth
A_ : List[str] = 0.419_4127
A_ : int = 0.3537_5586
A_ : int = 0.563_8502
A_ : Optional[Any] = 0.3468_6103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 710 | def UpperCamelCase ( __lowercase : str ,__lowercase : int ):
'''simple docstring'''
A_ : int = word.split()
def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str:
A_ : Optional[Any] = max_width - width
A_ : Union[str, Any] = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A_ : Dict = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A_ : Tuple = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A_ : List[str] = []
A_ : list[str] = []
A_ : Dict = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase ,__lowercase ,__lowercase ) )
# reset new line and new width
A_ , A_ : Any = [word], len(__lowercase )
A_ : int = max_width - width - len(__lowercase )
answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 | import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | 0 |
import math
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Dict = 0
A_ : int = 0
while num > 0:
A_ : Union[str, Any] = num % 8
A_ : Union[str, Any] = octal + (remainder * math.floor(math.pow(10 ,__lowercase ) ))
counter += 1
A_ : Optional[int] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(__lowercase )}'''
def UpperCamelCase ( ):
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 712 | from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = image_size
A_ : str = num_channels
A_ : Union[str, Any] = embeddings_size
A_ : Optional[Any] = hidden_sizes
A_ : Any = depths
A_ : List[str] = is_training
A_ : int = use_labels
A_ : Optional[Any] = hidden_act
A_ : List[Any] = num_labels
A_ : Optional[int] = scope
A_ : int = len(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = TFRegNetModel(config=lowercase )
A_ : Optional[Any] = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.num_labels
A_ : Tuple = TFRegNetForImageClassification(lowercase )
A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = TFRegNetModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : List[Any] = model_class(lowercase )
A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : int = layer_type
A_ : Tuple = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Any = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase )
A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
A_ : Dict = model_class(lowercase )
A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase )
A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
A_ : Any = self._prepare_for_class(lowercase , lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : Any = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
A_ : Tuple = model(**lowercase , training=lowercase )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 70 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCAmelCase ( __A ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = tempfile.mkdtemp()
A_ : Union[str, Any] = 8
# DPR tok
A_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A_ : List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowercase , exist_ok=lowercase )
A_ : List[str] = os.path.join(lowercase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
A_ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A_ : List[str] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A_ : List[str] = {'unk_token': '<unk>'}
A_ : List[str] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowercase , exist_ok=lowercase )
A_ : str = os.path.join(lowercase , BART_VOCAB_FILES_NAMES['vocab_file'] )
A_ : List[str] = os.path.join(lowercase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = os.path.join(self.tmpdirname , 'rag_tokenizer' )
A_ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A_ : Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase )
rag_tokenizer.save_pretrained(lowercase )
A_ : int = RagTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
A_ : Optional[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A_ : str = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
A_ : int = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A_ : str = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
| 713 | def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = 0
while b > 0:
if b & 1:
A_ : Any = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 70 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_UpperCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 | def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 | 0 |
def UpperCamelCase ( __lowercase : str ,__lowercase : bool = False ):
'''simple docstring'''
if not isinstance(__lowercase ,__lowercase ):
A_ : Union[str, Any] = f'''Expected string as input, found {type(__lowercase )}'''
raise ValueError(__lowercase )
if not isinstance(__lowercase ,__lowercase ):
A_ : str = f'''Expected boolean as use_pascal parameter, found {type(__lowercase )}'''
raise ValueError(__lowercase )
A_ : Any = input_str.split('_' )
A_ : str = 0 if use_pascal else 1
A_ : Optional[Any] = words[start_index:]
A_ : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
A_ : Optional[int] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715 | from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 716 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 0 |
from ....utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=None , lowercase=2_0_4_8 ):
"""simple docstring"""
A_ : Union[str, Any] = config.__dict__
A_ : Union[str, Any] = modal_hidden_size
if num_labels:
A_ : Any = num_labels | 717 | from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70 | 0 |
'''simple docstring'''
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 718 | import random
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = num - 1
A_ : Optional[Any] = 0
while s % 2 == 0:
A_ : Optional[int] = s // 2
t += 1
for _ in range(5 ):
A_ : Optional[int] = random.randrange(2 ,num - 1 )
A_ : Any = pow(__lowercase ,__lowercase ,__lowercase )
if v != 1:
A_ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
A_ : Union[str, Any] = i + 1
A_ : Tuple = (v**2) % num
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if num < 2:
return False
A_ : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowercase )
def UpperCamelCase ( __lowercase : int = 10_24 ):
'''simple docstring'''
while True:
A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowercase ):
return num
if __name__ == "__main__":
_UpperCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 70 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """MobileNetV1Config"""
# Base docstring
_UpperCAmelCase = """google/mobilenet_v1_1.0_224"""
_UpperCAmelCase = [1, 1024, 7, 7]
# Image classification docstring
_UpperCAmelCase = """google/mobilenet_v1_1.0_224"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase ( __lowercase : int ,__lowercase : Optional[int] ,__lowercase : str=None ):
'''simple docstring'''
A_ : Any = {}
if isinstance(__lowercase ,__lowercase ):
A_ : Optional[Any] = model.mobilenet_va
else:
A_ : str = model
A_ : List[Any] = 'MobilenetV1/Conv2d_0/'
A_ : Optional[Any] = backbone.conv_stem.convolution.weight
A_ : Optional[int] = backbone.conv_stem.normalization.bias
A_ : int = backbone.conv_stem.normalization.weight
A_ : int = backbone.conv_stem.normalization.running_mean
A_ : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A_ : Optional[Any] = i + 1
A_ : Union[str, Any] = i * 2
A_ : Optional[Any] = backbone.layer[pt_index]
A_ : Union[str, Any] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
A_ : int = pointer.convolution.weight
A_ : List[Any] = pointer.normalization.bias
A_ : Any = pointer.normalization.weight
A_ : Tuple = pointer.normalization.running_mean
A_ : Union[str, Any] = pointer.normalization.running_var
A_ : str = backbone.layer[pt_index + 1]
A_ : str = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
A_ : Dict = pointer.convolution.weight
A_ : List[str] = pointer.normalization.bias
A_ : Optional[int] = pointer.normalization.weight
A_ : Dict = pointer.normalization.running_mean
A_ : str = pointer.normalization.running_var
if isinstance(__lowercase ,__lowercase ):
A_ : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
A_ : Tuple = model.classifier.weight
A_ : Dict = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple ,__lowercase : Dict ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
A_ : int = tf.train.list_variables(__lowercase )
A_ : Any = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
A_ : Optional[int] = tf.train.load_variable(__lowercase ,__lowercase )
A_ : int = array
# Build TF to PyTorch weights loading map
A_ : Optional[int] = _build_tf_to_pytorch_map(__lowercase ,__lowercase ,__lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
A_ : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
A_ : Union[str, Any] = np.transpose(__lowercase ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
A_ : str = array.squeeze().transpose()
else:
A_ : Dict = np.transpose(__lowercase ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
A_ : Dict = torch.from_numpy(__lowercase )
tf_weights.pop(__lowercase ,__lowercase )
tf_weights.pop(name + '/RMSProp' ,__lowercase )
tf_weights.pop(name + '/RMSProp_1' ,__lowercase )
tf_weights.pop(name + '/ExponentialMovingAverage' ,__lowercase )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def UpperCamelCase ( __lowercase : torch.Tensor ,__lowercase : nn.Convad ):
'''simple docstring'''
A_ : str = features.shape[-2:]
A_ : str = conv_layer.stride
A_ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
A_ : Tuple = max(kernel_height - stride_height ,0 )
else:
A_ : Union[str, Any] = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
A_ : Any = max(kernel_width - stride_width ,0 )
else:
A_ : Union[str, Any] = max(kernel_width - (in_width % stride_width) ,0 )
A_ : List[Any] = pad_along_width // 2
A_ : List[str] = pad_along_width - pad_left
A_ : Optional[int] = pad_along_height // 2
A_ : Union[str, Any] = pad_along_height - pad_top
A_ : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowercase ,__lowercase ,'constant' ,0.0 )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = False , lowercase = True , lowercase = True , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
A_ : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A_ : Union[str, Any] = nn.Convad(
in_channels=lowercase , out_channels=lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase , groups=lowercase , bias=lowercase , padding_mode='zeros' , )
if use_normalization:
A_ : Any = nn.BatchNormad(
num_features=lowercase , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowercase , track_running_stats=lowercase , )
else:
A_ : Union[str, Any] = None
if use_activation:
if isinstance(lowercase , lowercase ):
A_ : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase ):
A_ : Optional[int] = ACTaFN[config.hidden_act]
else:
A_ : Optional[int] = config.hidden_act
else:
A_ : Union[str, Any] = None
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.config.tf_padding:
A_ : List[str] = apply_tf_padding(lowercase , self.convolution )
A_ : List[Any] = self.convolution(lowercase )
if self.normalization is not None:
A_ : List[Any] = self.normalization(lowercase )
if self.activation is not None:
A_ : List[Any] = self.activation(lowercase )
return features
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = MobileNetVaConfig
lowerCamelCase_ = load_tf_weights_in_mobilenet_va
lowerCamelCase_ = '''mobilenet_v1'''
lowerCamelCase_ = '''pixel_values'''
lowerCamelCase_ = False
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_UpperCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = True ):
"""simple docstring"""
super().__init__(lowercase )
A_ : int = config
A_ : Union[str, Any] = 3_2
A_ : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
A_ : int = MobileNetVaConvLayer(
lowercase , in_channels=config.num_channels , out_channels=lowercase , kernel_size=3 , stride=2 , )
A_ : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A_ : Optional[int] = nn.ModuleList()
for i in range(1_3 ):
A_ : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A_ : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=3 , stride=strides[i] , groups=lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=1 , ) )
A_ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
A_ : Optional[Any] = self.conv_stem(lowercase )
A_ : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A_ : int = layer_module(lowercase )
if output_hidden_states:
A_ : str = all_hidden_states + (hidden_states,)
A_ : List[Any] = hidden_states
if self.pooler is not None:
A_ : Optional[Any] = torch.flatten(self.pooler(lowercase ) , start_dim=1 )
else:
A_ : Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=lowercase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
super().__init__(lowercase )
A_ : int = config.num_labels
A_ : Tuple = MobileNetVaModel(lowercase )
A_ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A_ : Tuple = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase )
A_ : Optional[int] = nn.Linear(lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.mobilenet_va(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Any = self.classifier(self.dropout(lowercase ) )
A_ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Any = 'single_label_classification'
else:
A_ : Any = 'multi_label_classification'
if self.config.problem_type == "regression":
A_ : Tuple = MSELoss()
if self.num_labels == 1:
A_ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : List[str] = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
A_ : Any = CrossEntropyLoss()
A_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Dict = BCEWithLogitsLoss()
A_ : List[str] = loss_fct(lowercase , lowercase )
if not return_dict:
A_ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states , )
| 719 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 70 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCAmelCase = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Any = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' ,__lowercase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A_ : Dict = {
config.replace('Config' ,'' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A_ : List[Any] = collections.defaultdict(__lowercase )
A_ : Optional[int] = collections.defaultdict(__lowercase )
A_ : Tuple = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
A_ : Tuple = None
if _re_tf_models.match(__lowercase ) is not None:
A_ : List[str] = tf_models
A_ : Any = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
A_ : Tuple = flax_models
A_ : int = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
A_ : Tuple = pt_models
A_ : Tuple = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
A_ : Any = True
break
# Try again after removing the last word in the name
A_ : Dict = ''.join(camel_case_split(__lowercase )[:-1] )
A_ : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A_ : Tuple = list(__lowercase )
all_models.sort()
A_ : str = {'model_type': all_models}
A_ : Optional[int] = [pt_models[t] for t in all_models]
A_ : str = [tf_models[t] for t in all_models]
A_ : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A_ : List[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A_ : List[Any] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A_ : Optional[int] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A_ : List[str] = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A_ : Dict = 'AutoTokenizer'
A_ : List[Any] = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : Tuple = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A_ : Any = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}''']
A_ : Optional[Any] = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase ,__lowercase ,__lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase ,__lowercase ):
continue
# First extract all model_names
A_ : Optional[int] = []
for name in getattr(__lowercase ,__lowercase ).values():
if isinstance(__lowercase ,__lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Any = get_frameworks_table()
A_ : Any = Dataset.from_pandas(__lowercase )
A_ : Any = hf_hub_download(
'huggingface/transformers-metadata' ,'pipeline_tags.json' ,repo_type='dataset' ,token=__lowercase )
A_ : Dict = Dataset.from_json(__lowercase )
A_ : Any = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__lowercase ) )
}
A_ : str = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A_ : Optional[int] = sorted(table.keys() )
A_ : Union[str, Any] = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
A_ : Any = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase ,'frameworks.json' ) )
tags_dataset.to_json(os.path.join(__lowercase ,'pipeline_tags.json' ) )
if commit_sha is not None:
A_ : Optional[Any] = (
f'''Update with commit {commit_sha}\n\nSee: '''
f'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
A_ : Optional[int] = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' ,folder_path=__lowercase ,repo_type='dataset' ,token=__lowercase ,commit_message=__lowercase ,)
def UpperCamelCase ( ):
'''simple docstring'''
A_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A_ : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
A_ : Optional[int] = []
for key in pipeline_tasks:
if key not in in_table:
A_ : str = pipeline_tasks[key]['pt']
if isinstance(__lowercase ,(list, tuple) ):
A_ : Dict = model[0]
A_ : Optional[int] = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
A_ : Any = ', '.join(__lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
_UpperCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 720 | import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = 4
A_ : int = 3
A_ : List[str] = (3_2, 3_2)
A_ : Any = jax.random.PRNGKey(0 )
A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
| 70 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_UpperCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : str = set()
A_ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Optional[int] = char
A_ : Union[str, Any] = set(__lowercase )
return pairs
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase , lowercase="__start__" , lowercase="__end__" , lowercase="__unk__" , lowercase="__null__" , **lowercase , ):
"""simple docstring"""
super().__init__(unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , **lowercase )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
A_ : Optional[Any] = json.load(lowercase )
A_ : str = {v: k for k, v in self.encoder.items()}
with open(lowercase , encoding='utf-8' ) as merges_handle:
A_ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in merges]
A_ : List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : Tuple = {}
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A_ : List[str] = re.sub('([.,!?()])' , r' \1' , lowercase )
A_ : Tuple = re.sub('(\')' , r' \1 ' , lowercase )
A_ : Optional[int] = re.sub(r'\s{2,}' , ' ' , lowercase )
if "\n" in token:
A_ : Dict = token.replace('\n' , ' __newln__' )
A_ : Optional[Any] = token.split(' ' )
A_ : Union[str, Any] = []
for token in tokens:
if not len(lowercase ):
continue
A_ : Union[str, Any] = token.lower()
A_ : Optional[int] = tuple(lowercase )
A_ : List[str] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A_ : Optional[int] = get_pairs(lowercase )
if not pairs:
words.append(lowercase )
continue
while True:
A_ : str = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A_ : Tuple = bigram
A_ : List[Any] = []
A_ : int = 0
while i < len(lowercase ):
try:
A_ : List[Any] = word.index(lowercase , lowercase )
new_word.extend(word[i:j] )
A_ : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : int = tuple(lowercase )
A_ : Any = new_word
if len(lowercase ) == 1:
break
else:
A_ : Dict = get_pairs(lowercase )
A_ : List[str] = '@@ '.join(lowercase )
A_ : List[Any] = word[:-4]
A_ : Optional[int] = word
words.append(lowercase )
return " ".join(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = []
A_ : Optional[int] = re.findall(r'\S+\n?' , lowercase )
for token in words:
split_tokens.extend(list(self.bpe(lowercase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = token.lower()
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.decoder.get(lowercase , self.unk_token )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = ' '.join(lowercase ).replace('@@ ' , '' ).strip()
return out_string
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A_ : Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
A_ : Optional[int] = 0
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A_ : Dict = token_index
writer.write(' '.join(lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
| 721 | import numpy as np
_UpperCAmelCase = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Any = np.array(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = message.lower()
A_ : Tuple = message.replace(' ' , '' )
A_ : int = message.replace('j' , 'i' )
A_ : Any = np.empty((2, len(lowercase )) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[int] = self.letter_to_numbers(message[letter_index] )
A_ : Union[str, Any] = numbers[0]
A_ : Union[str, Any] = numbers[1]
A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) )
A_ : int = ''
for numbers_index in range(len(lowercase ) ):
A_ : str = int(second_step[numbers_index * 2] )
A_ : str = int(second_step[(numbers_index * 2) + 1] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : Tuple = encoded_message + letter
return encoded_message
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = message.lower()
message.replace(' ' , '' )
A_ : Tuple = np.empty(2 * len(lowercase ) )
for letter_index in range(len(lowercase ) ):
A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Optional[int] = numbers[0]
A_ : Dict = numbers[1]
A_ : Optional[int] = first_step.reshape((2, len(lowercase )) )
A_ : List[str] = ''
for numbers_index in range(len(lowercase ) ):
A_ : List[Any] = int(second_step[0, numbers_index] )
A_ : Optional[int] = int(second_step[1, numbers_index] )
A_ : Tuple = self.numbers_to_letter(lowercase , lowercase )
A_ : str = decoded_message + letter
return decoded_message
| 70 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case ):
if isinstance(__snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = offset
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ):
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Union[int, float] , _A : bool = True , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ):
_UpperCamelCase = image.astype(np.floataa )
if offset:
_UpperCamelCase = image - (scale / 2)
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = to_numpy_array(_A )
if do_resize:
_UpperCamelCase = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
_UpperCamelCase = self.center_crop(_A , size=_A )
if do_rescale:
_UpperCamelCase = self.rescale(image=_A , scale=_A , offset=_A )
if do_normalize:
_UpperCamelCase = self.normalize(image=_A , mean=_A , std=_A )
_UpperCamelCase = to_channel_dimension_format(_A , _A )
return image
def UpperCamelCase_ ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Dict , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = offset if offset is not None else self.offset
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A , default_to_square=_A )
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_UpperCamelCase = make_batched(_A )
_UpperCamelCase = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , offset=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
_UpperCamelCase = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 71 | from sklearn.metrics import mean_squared_error
import datasets
_lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCamelCase_ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ):
_UpperCamelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 71 | 1 |
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = [[] for _ in range(__snake_case )]
_UpperCamelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__snake_case ) <= key:
return input_string
for position, character in enumerate(__snake_case ):
_UpperCamelCase = position % (lowest * 2) # puts it in bounds
_UpperCamelCase = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__snake_case )
_UpperCamelCase = [''''''.join(__snake_case ) for row in temp_grid]
_UpperCamelCase = ''''''.join(__snake_case )
return output_string
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
_UpperCamelCase = [[] for _ in range(__snake_case )] # generates template
for position in range(len(__snake_case ) ):
_UpperCamelCase = position % (lowest * 2) # puts it in bounds
_UpperCamelCase = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
_UpperCamelCase = 0
for row in temp_grid: # fills in the characters
_UpperCamelCase = input_string[counter : counter + len(__snake_case )]
grid.append(list(__snake_case ) )
counter += len(__snake_case )
_UpperCamelCase = '''''' # reads as zigzag
for position in range(len(__snake_case ) ):
_UpperCamelCase = position % (lowest * 2) # puts it in bounds
_UpperCamelCase = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _snake_case ( __snake_case ):
_UpperCamelCase = {}
for key_guess in range(1 , len(__snake_case ) ): # tries every key
_UpperCamelCase = decrypt(__snake_case , __snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 71 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71 | from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71 | 1 |
import torch
from torch import nn
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Any , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : Any=1 , _A : Tuple=False ):
super().__init__()
_UpperCamelCase = n_token
_UpperCamelCase = d_embed
_UpperCamelCase = d_proj
_UpperCamelCase = cutoffs + [n_token]
_UpperCamelCase = [0] + self.cutoffs
_UpperCamelCase = div_val
_UpperCamelCase = self.cutoffs[0]
_UpperCamelCase = len(self.cutoffs ) - 1
_UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCamelCase = nn.ModuleList()
_UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A ) ) )
else:
self.out_projs.append(_A )
self.out_layers.append(nn.Linear(_A , _A ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A ) ) )
self.out_layers.append(nn.Linear(_A , r_idx - l_idx ) )
_UpperCamelCase = keep_order
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[str] , _A : Any , _A : Tuple ):
if proj is None:
_UpperCamelCase = nn.functional.linear(_A , _A , bias=_A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCamelCase = nn.functional.linear(_A , proj.t().contiguous() )
_UpperCamelCase = nn.functional.linear(_A , _A , bias=_A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any]=None , _A : List[Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCamelCase = hidden[..., :-1, :].contiguous()
_UpperCamelCase = labels[..., 1:].contiguous()
_UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
_UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCamelCase = labels != -100
_UpperCamelCase = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device )
_UpperCamelCase = (
-nn.functional.log_softmax(_A , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCamelCase = nn.functional.log_softmax(_A , dim=-1 )
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_A )
biases.append(_A )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(_A , _A , _A , _A )
_UpperCamelCase = nn.functional.log_softmax(_A , dim=1 )
if labels is None:
_UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCamelCase = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device )
_UpperCamelCase = 0
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(_A ) - 1 ):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCamelCase = labels.index_select(0 , _A ) - l_idx
_UpperCamelCase = head_logprob.index_select(0 , _A )
_UpperCamelCase = hidden.index_select(0 , _A )
else:
_UpperCamelCase = hidden
if i == 0:
if labels is not None:
_UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(_A , _A , _A , _A )
_UpperCamelCase = nn.functional.log_softmax(_A , dim=1 )
_UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _A , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase_ ( self : int , _A : int ):
if self.n_clusters == 0:
_UpperCamelCase = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_A , dim=-1 )
else:
# construct weights and biases
_UpperCamelCase , _UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCamelCase , _UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCamelCase = self.out_layers[i].weight
_UpperCamelCase = self.out_layers[i].bias
if i == 0:
_UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_A )
biases.append(_A )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[0], biases[0], self.out_projs[0]
_UpperCamelCase = self._compute_logit(_A , _A , _A , _A )
_UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCamelCase = nn.functional.log_softmax(_A , dim=1 )
_UpperCamelCase = [0] + self.cutoffs
for i in range(len(_A ) - 1 ):
_UpperCamelCase , _UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = weights[i], biases[i], self.out_projs[i]
_UpperCamelCase = self._compute_logit(_A , _A , _A , _A )
_UpperCamelCase = nn.functional.log_softmax(_A , dim=1 )
_UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCamelCase = logprob_i
return out
| 71 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 | import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ):
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 71 | 1 |
from __future__ import annotations
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase , _UpperCamelCase = position
_UpperCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCamelCase = []
for position in positions:
_UpperCamelCase , _UpperCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def _snake_case ( __snake_case ):
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
_UpperCamelCase , _UpperCamelCase = position
if board[y][x] == 0:
_UpperCamelCase = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
_UpperCamelCase = 0
return False
def _snake_case ( __snake_case ):
_UpperCamelCase = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
_UpperCamelCase = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
_UpperCamelCase = 0
_UpperCamelCase = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_lowerCAmelCase = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_lowerCAmelCase = {
"facebook/s2t-small-librispeech-asr": 1_024,
}
_lowerCAmelCase = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_lowerCAmelCase = {"mustc": MUSTC_LANGS}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = MAX_MODEL_INPUT_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = []
def __init__( self : Optional[Any] , _A : List[Any] , _A : Tuple , _A : int="<s>" , _A : str="</s>" , _A : List[Any]="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]=False , _A : Optional[Any]=False , _A : List[Any]=None , _A : List[str]=None , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ):
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , do_upper_case=_A , do_lower_case=_A , tgt_lang=_A , lang_codes=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_UpperCamelCase = do_upper_case
_UpperCamelCase = do_lower_case
_UpperCamelCase = load_json(_A )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = spm_file
_UpperCamelCase = load_spm(_A , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCamelCase = lang_codes
_UpperCamelCase = LANGUAGES[lang_codes]
_UpperCamelCase = [F"""<lang:{lang}>""" for lang in self.langs]
_UpperCamelCase = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
_UpperCamelCase = self.lang_tokens
_UpperCamelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCamelCase = {}
@property
def UpperCamelCase_ ( self : Optional[int] ):
return len(self.encoder )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(_A )
def UpperCamelCase_ ( self : int , _A : str ):
_UpperCamelCase = self.lang_code_to_id[tgt_lang]
_UpperCamelCase = [lang_code_id]
def UpperCamelCase_ ( self : Optional[int] , _A : str ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCamelCase_ ( self : str , _A : Tuple ):
return self.encoder.get(_A , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
return self.decoder.get(_A , self.unk_token )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] ):
_UpperCamelCase = []
_UpperCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCamelCase = self.sp_model.decode(_A )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCamelCase = []
else:
current_sub_tokens.append(_A )
_UpperCamelCase = self.sp_model.decode(_A )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[Any]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
_UpperCamelCase = [1] * len(self.prefix_tokens )
_UpperCamelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , _A : Dict ):
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self : str , _A : str , _A : Optional[str] = None ):
_UpperCamelCase = Path(_A )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_UpperCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_UpperCamelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _A )
if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _A )
elif not os.path.isfile(self.spm_file ):
with open(_A , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (str(_A ), str(_A ))
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def _snake_case ( __snake_case ):
with open(__snake_case , '''r''' ) as f:
return json.load(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 71 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
UpperCAmelCase = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__snake_case , __snake_case , __snake_case ):
assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case , __snake_case ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator(
__snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
_UpperCamelCase = lmap(str.strip , __snake_case )
write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( __lowercase ):
@staticmethod
@abstractmethod
def UpperCamelCase_ ( _A : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self : Tuple ):
raise NotImplementedError()
| 71 | from __future__ import annotations
import typing
from collections import Counter
def _snake_case ( __snake_case ):
_UpperCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
_UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
_UpperCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _snake_case ( __snake_case = 1000 ):
_UpperCamelCase = pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 71 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = inspect.getfile(accelerate.test_utils )
_UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
_UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def UpperCamelCase_ ( self : Any ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
_UpperCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self : str ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
_UpperCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_UpperCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 10)
_lowerCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCAmelCase = ""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 71 | import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase_ :
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self : Dict ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ):
_UpperCamelCase = 4
_UpperCamelCase = 32
_UpperCamelCase = (32, 32)
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = (batch_size, num_channels) + sizes
_UpperCamelCase = randn_tensor(_A , generator=_A , device=_A )
_UpperCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
_UpperCamelCase = 128
_UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
_UpperCamelCase = torch.manual_seed(1 )
_UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
_UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A )
if include_skip_sample:
_UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_UpperCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
_UpperCamelCase = unet_block(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCamelCase = output[0, -1, -3:, -3:]
_UpperCamelCase = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
model.to(_A )
model.train()
_UpperCamelCase = model(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = randn_tensor(output.shape , device=_A )
_UpperCamelCase = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 71 | def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DEISMultistepScheduler,)
UpperCAmelCase = (("num_inference_steps", 25),)
def UpperCamelCase_ ( self : Tuple , **_A : Optional[int] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : Optional[int] , _A : str=0 , **_A : Optional[Any] ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_UpperCamelCase = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase , _UpperCamelCase = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
_UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Optional[Any] , _A : str=0 , **_A : List[str] ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_UpperCamelCase = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[Any] , _A : Any=None , **_A : Optional[Any] ):
if scheduler is None:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = 10
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ):
_UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCamelCase = scheduler.timesteps[5]
_UpperCamelCase = scheduler.timesteps[6]
_UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCamelCase = self.full_loop(scheduler=_A )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
_UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase = self.full_loop(scheduler=_A )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def UpperCamelCase_ ( self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : Optional[Any] ):
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type='''deis''' , solver_order=_A , solver_type=_A , )
def UpperCamelCase_ ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : List[Any] ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
_UpperCamelCase = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self : int ):
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def UpperCamelCase_ ( self : int ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.full_loop()
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = 10
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
| 71 | import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCamelCase = None
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif name.split('''.''' )[0] == "proj":
_UpperCamelCase = fairseq_model.proj
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def _snake_case ( __snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.split(''' ''' )[0] for line in lines]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case )
_UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
_UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case )
_UpperCamelCase = SpeechaTextaForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
_UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
# add projection layer
_UpperCamelCase = nn.Parameter(projection_layer.weight )
_UpperCamelCase = nn.Parameter(projection_layer.bias )
_UpperCamelCase = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''speech_to_text_2'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 71 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = ["model.decoder.embed_positions.weights"]
def _snake_case ( __snake_case ):
if "emb" in name:
_UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
_UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
_UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
_UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
_UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
_UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
_UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
_UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
_UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
_UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = list(state_dict.keys() )
_UpperCamelCase = {}
for key in keys:
_UpperCamelCase = state_dict.pop(__snake_case )
_UpperCamelCase = rename_keys(__snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase = val[:hidden_size, :]
_UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase = val
else:
_UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def _snake_case ( __snake_case ):
if checkpoint == "small":
# default config values
_UpperCamelCase = 1024
_UpperCamelCase = 24
_UpperCamelCase = 16
elif checkpoint == "medium":
_UpperCamelCase = 1536
_UpperCamelCase = 48
_UpperCamelCase = 24
elif checkpoint == "large":
_UpperCamelCase = 2048
_UpperCamelCase = 48
_UpperCamelCase = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_UpperCamelCase = MusicgenDecoderConfig(
hidden_size=__snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=__snake_case , num_attention_heads=__snake_case , )
return config
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case="cpu" ):
_UpperCamelCase = MusicGen.get_pretrained(__snake_case , device=__snake_case )
_UpperCamelCase = decoder_config_from_checkpoint(__snake_case )
_UpperCamelCase = fairseq_model.lm.state_dict()
_UpperCamelCase , _UpperCamelCase = rename_state_dict(
__snake_case , hidden_size=decoder_config.hidden_size )
_UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
_UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
_UpperCamelCase = MusicgenForCausalLM(__snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase , _UpperCamelCase = decoder.load_state_dict(__snake_case , strict=__snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__snake_case )
if len(__snake_case ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__snake_case ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=__snake_case , audio_encoder=__snake_case , decoder=__snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__snake_case )
# check we can do a forward pass
_UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_UpperCamelCase = model(input_ids=__snake_case , decoder_input_ids=__snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
_UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
_UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
_UpperCamelCase = MusicgenProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
# set the appropriate bos/pad token ids
_UpperCamelCase = 2048
_UpperCamelCase = 2048
# set other default generation config params
_UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase = True
_UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__snake_case )
processor.push_to_hub(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_lowerCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 71 | from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ):
_UpperCamelCase = TFDebertaVaModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ):
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ):
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFDebertaVaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
_UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 71 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = np.array(data["data"])
_lowerCAmelCase = np.array(data["target"])
_lowerCAmelCase = data["target_names"]
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase = train_test_split(X, y)
def _snake_case ( __snake_case , __snake_case ):
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=5 ):
_UpperCamelCase = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
_UpperCamelCase = []
for data_point in data:
_UpperCamelCase = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCamelCase = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCamelCase = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 71 | def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
# Return True if there is node that has not iterated.
_UpperCamelCase = [False] * len(__snake_case )
_UpperCamelCase = []
queue.append(__snake_case )
_UpperCamelCase = True
while queue:
_UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case )
_UpperCamelCase = True
_UpperCamelCase = u
return visited[t]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
# This array is filled by BFS and to store path
_UpperCamelCase = [-1] * (len(__snake_case ))
_UpperCamelCase = 0
while bfs(__snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = float('''Inf''' )
_UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase = min(__snake_case , graph[parent[s]][s] )
_UpperCamelCase = parent[s]
max_flow += path_flow
_UpperCamelCase = sink
while v != source:
_UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase = parent[v]
return max_flow
_lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCAmelCase, _lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 71 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
_UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
_lowerCAmelCase = 701
_lowerCAmelCase = 1_000_000_000
_lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 71 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["image_processor"]
UpperCAmelCase = "SamImageProcessor"
def __init__( self : List[str] , _A : int ):
super().__init__(_A )
_UpperCamelCase = self.image_processor
_UpperCamelCase = -10
_UpperCamelCase = self.image_processor.size['''longest_edge''']
def __call__( self : Optional[Any] , _A : Tuple=None , _A : List[Any]=None , _A : Dict=None , _A : Any=None , _A : Optional[Union[str, TensorType]] = None , **_A : List[str] , ):
_UpperCamelCase = self.image_processor(
_A , return_tensors=_A , **_A , )
# pop arguments that are not used in the foward but used nevertheless
_UpperCamelCase = encoding_image_processor['''original_sizes''']
if hasattr(_A , '''numpy''' ): # Checks if Torch or TF tensor
_UpperCamelCase = original_sizes.numpy()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._check_and_preprocess_points(
input_points=_A , input_labels=_A , input_boxes=_A , )
_UpperCamelCase = self._normalize_and_convert(
_A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , )
return encoding_image_processor
def UpperCamelCase_ ( self : str , _A : List[Any] , _A : Optional[int] , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : Optional[Any]=None , _A : Dict="pt" , ):
if input_points is not None:
if len(_A ) != len(_A ):
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] ) for point in input_points
]
else:
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , _A )
for point, original_size in zip(_A , _A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_UpperCamelCase , _UpperCamelCase = self._pad_points_and_labels(_A , _A )
_UpperCamelCase = np.array(_A )
if input_labels is not None:
_UpperCamelCase = np.array(_A )
if input_boxes is not None:
if len(_A ) != len(_A ):
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A )
for box in input_boxes
]
else:
_UpperCamelCase = [
self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A )
for box, original_size in zip(_A , _A )
]
_UpperCamelCase = np.array(_A )
if input_boxes is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(_A )
# boxes batch size of 1 by default
_UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(_A )
# boxes batch size of 1 by default
_UpperCamelCase = tf.expand_dims(_A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(_A )
# point batch size of 1 by default
_UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(_A )
# point batch size of 1 by default
_UpperCamelCase = tf.expand_dims(_A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_UpperCamelCase = torch.from_numpy(_A )
# point batch size of 1 by default
_UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_UpperCamelCase = tf.convert_to_tensor(_A )
# point batch size of 1 by default
_UpperCamelCase = tf.expand_dims(_A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def UpperCamelCase_ ( self : Any , _A : str , _A : Dict ):
_UpperCamelCase = max([point.shape[0] for point in input_points] )
_UpperCamelCase = []
for i, point in enumerate(_A ):
if point.shape[0] != expected_nb_points:
_UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_UpperCamelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_A )
_UpperCamelCase = processed_input_points
return input_points, input_labels
def UpperCamelCase_ ( self : List[Any] , _A : int , _A : np.ndarray , _A : Any , _A : Dict=False ):
_UpperCamelCase , _UpperCamelCase = original_size
_UpperCamelCase , _UpperCamelCase = self.image_processor._get_preprocess_shape(_A , longest_edge=_A )
_UpperCamelCase = deepcopy(_A ).astype(_A )
if is_bounding_box:
_UpperCamelCase = coords.reshape(-1 , 2 , 2 )
_UpperCamelCase = coords[..., 0] * (new_w / old_w)
_UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_UpperCamelCase = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase_ ( self : Any , _A : Optional[int]=None , _A : List[str]=None , _A : Optional[int]=None , ):
if input_points is not None:
if hasattr(_A , '''numpy''' ): # Checks for TF or Torch tensor
_UpperCamelCase = input_points.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_points[0] , _A ):
raise ValueError('''Input points must be a list of list of floating points.''' )
_UpperCamelCase = [np.array(_A ) for input_point in input_points]
else:
_UpperCamelCase = None
if input_labels is not None:
if hasattr(_A , '''numpy''' ):
_UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(_A , _A ) or not isinstance(input_labels[0] , _A ):
raise ValueError('''Input labels must be a list of list integers.''' )
_UpperCamelCase = [np.array(_A ) for label in input_labels]
else:
_UpperCamelCase = None
if input_boxes is not None:
if hasattr(_A , '''numpy''' ):
_UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(_A , _A )
or not isinstance(input_boxes[0] , _A )
or not isinstance(input_boxes[0][0] , _A )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
_UpperCamelCase = [np.array(_A ).astype(np.floataa ) for box in input_boxes]
else:
_UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(_A ) )
def UpperCamelCase_ ( self : List[Any] , *_A : Union[str, Any] , **_A : Optional[Any] ):
return self.image_processor.post_process_masks(*_A , **_A )
| 71 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "markuplm"
def __init__( self : Optional[int] , _A : Dict=3_0522 , _A : Union[str, Any]=768 , _A : Dict=12 , _A : Union[str, Any]=12 , _A : Optional[int]=3072 , _A : List[str]="gelu" , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[int]=2 , _A : Optional[Any]=0.02 , _A : Dict=1e-12 , _A : Dict=0 , _A : List[str]=0 , _A : Any=2 , _A : Tuple=256 , _A : Tuple=1024 , _A : str=216 , _A : str=1001 , _A : Any=32 , _A : Any=50 , _A : Optional[Any]="absolute" , _A : Tuple=True , _A : Any=None , **_A : Any , ):
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
# additional properties
_UpperCamelCase = max_depth
_UpperCamelCase = max_xpath_tag_unit_embeddings
_UpperCamelCase = max_xpath_subs_unit_embeddings
_UpperCamelCase = tag_pad_id
_UpperCamelCase = subs_pad_id
_UpperCamelCase = xpath_unit_hidden_size
| 71 | def _snake_case ( __snake_case , __snake_case , __snake_case ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
_UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
_lowerCAmelCase = 701
_lowerCAmelCase = 1_000_000_000
_lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 71 | 1 |
from math import sqrt
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 71 | from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 - _cos) / 2
_UpperCamelCase = 1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 + _cos) / 2
_UpperCamelCase = -1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = _sin / 2
_UpperCamelCase = 0
_UpperCamelCase = -ba
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 1 - alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = 1 + alpha * big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha * big_a
_UpperCamelCase = 1 + alpha / big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha / big_a
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (pmc + aaa)
_UpperCamelCase = 2 * big_a * mpc
_UpperCamelCase = big_a * (pmc - aaa)
_UpperCamelCase = ppmc + aaa
_UpperCamelCase = -2 * pmpc
_UpperCamelCase = ppmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (ppmc + aaa)
_UpperCamelCase = -2 * big_a * pmpc
_UpperCamelCase = big_a * (ppmc - aaa)
_UpperCamelCase = pmc + aaa
_UpperCamelCase = 2 * mpc
_UpperCamelCase = pmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 71 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=99 , _A : Optional[Any]=13 , _A : List[str]=7 , _A : List[Any]=9 , _A : Dict=True , _A : str=True , _A : List[Any]=False , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : List[str]=4 , _A : Tuple=37 , _A : Optional[int]=8 , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.002 , _A : Optional[Any]=1 , _A : List[Any]=0 , _A : List[str]=0 , _A : Dict=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = encoder_seq_length
_UpperCamelCase = decoder_seq_length
# For common tests
_UpperCamelCase = self.decoder_seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = d_ff
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = dropout_rate
_UpperCamelCase = initializer_factor
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = decoder_start_token_id
_UpperCamelCase = None
_UpperCamelCase = decoder_layers
def UpperCamelCase_ ( self : Optional[int] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : Dict , _A : int , _A : str=None , _A : Dict=None , _A : List[str]=None , _A : Any=None , _A : str=None , ):
if attention_mask is None:
_UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_A )
if decoder_head_mask is None:
_UpperCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_A )
if cross_attn_head_mask is None:
_UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase = self.get_config()
_UpperCamelCase = config.num_attention_heads
_UpperCamelCase = self.prepare_inputs_dict(_A , _A , _A )
return config, input_dict
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self : Optional[int] ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase_ ( self : int ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Any , _A : Optional[int] , ):
_UpperCamelCase = UMTaModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(
input_ids=_A , decoder_input_ids=_A , attention_mask=_A , decoder_attention_mask=_A , )
_UpperCamelCase = model(input_ids=_A , decoder_input_ids=_A )
_UpperCamelCase = result.last_hidden_state
_UpperCamelCase = result.past_key_values
_UpperCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase_ ( self : List[Any] , _A : int , _A : str , _A : List[str] , _A : List[Any] , _A : Any , _A : Optional[Any] , ):
_UpperCamelCase = UMTaModel(config=_A ).get_decoder().to(_A ).eval()
# first forward pass
_UpperCamelCase = model(_A , use_cache=_A )
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase = model(_A )['''last_hidden_state''']
_UpperCamelCase = model(_A , past_key_values=_A )['''last_hidden_state''']
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[Any] , _A : Any , ):
_UpperCamelCase = UMTaModel(config=_A ).to(_A ).half().eval()
_UpperCamelCase = model(**_A )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_A ).any().item() )
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase = [0.8, 0.9]
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = UMTaModel(config_and_inputs[0] ).to(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=_A , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs[0]
_UpperCamelCase = UMTaForConditionalGeneration(_A ).eval()
model.to(_A )
_UpperCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_A ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_A ),
}
for attn_name, (name, mask) in zip(_A , head_masking.items() ):
_UpperCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_A )
_UpperCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_A , return_dict_in_generate=_A , **_A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_A , legacy=_A )
_UpperCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_UpperCamelCase = tokenizer(_A , return_tensors='''pt''' , padding=_A ).input_ids
# fmt: off
_UpperCamelCase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_A , _A )
_UpperCamelCase = model.generate(input_ids.to(_A ) )
_UpperCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertEqual(_A , _A )
| 71 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "PoolFormerConfig"
# Base docstring
_lowerCAmelCase = "sail/poolformer_s12"
_lowerCAmelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCAmelCase = "sail/poolformer_s12"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _snake_case ( __snake_case , __snake_case = 0.0 , __snake_case = False ):
if drop_prob == 0.0 or not training:
return input
_UpperCamelCase = 1 - drop_prob
_UpperCamelCase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_UpperCamelCase = keep_prob + torch.rand(__snake_case , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_UpperCamelCase = input.div(__snake_case ) * random_tensor
return output
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : int , _A : Optional[float] = None ):
super().__init__()
_UpperCamelCase = drop_prob
def UpperCamelCase_ ( self : Union[str, Any] , _A : torch.Tensor ):
return drop_path(_A , self.drop_prob , self.training )
def UpperCamelCase_ ( self : List[str] ):
return "p={}".format(self.drop_prob )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , _A : int , _A : Dict , _A : List[str] , _A : Any , _A : List[str] , _A : Union[str, Any]=None ):
super().__init__()
_UpperCamelCase = patch_size if isinstance(_A , collections.abc.Iterable ) else (patch_size, patch_size)
_UpperCamelCase = stride if isinstance(_A , collections.abc.Iterable ) else (stride, stride)
_UpperCamelCase = padding if isinstance(_A , collections.abc.Iterable ) else (padding, padding)
_UpperCamelCase = nn.Convad(_A , _A , kernel_size=_A , stride=_A , padding=_A )
_UpperCamelCase = norm_layer(_A ) if norm_layer else nn.Identity()
def UpperCamelCase_ ( self : str , _A : Union[str, Any] ):
_UpperCamelCase = self.projection(_A )
_UpperCamelCase = self.norm(_A )
return embeddings
class lowerCAmelCase_ ( nn.GroupNorm ):
def __init__( self : Optional[Any] , _A : Any , **_A : Union[str, Any] ):
super().__init__(1 , _A , **_A )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Any , _A : List[Any] ):
super().__init__()
_UpperCamelCase = nn.AvgPoolad(_A , stride=1 , padding=pool_size // 2 , count_include_pad=_A )
def UpperCamelCase_ ( self : str , _A : Optional[int] ):
return self.pool(_A ) - hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Any , _A : str , _A : Tuple , _A : List[str] , _A : int ):
super().__init__()
_UpperCamelCase = nn.Convad(_A , _A , 1 )
_UpperCamelCase = nn.Convad(_A , _A , 1 )
_UpperCamelCase = PoolFormerDropPath(_A )
if isinstance(config.hidden_act , _A ):
_UpperCamelCase = ACTaFN[config.hidden_act]
else:
_UpperCamelCase = config.hidden_act
def UpperCamelCase_ ( self : int , _A : List[str] ):
_UpperCamelCase = self.conva(_A )
_UpperCamelCase = self.act_fn(_A )
_UpperCamelCase = self.drop(_A )
_UpperCamelCase = self.conva(_A )
_UpperCamelCase = self.drop(_A )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : List[str] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[int] , _A : List[str] , _A : Tuple ):
super().__init__()
_UpperCamelCase = PoolFormerPooling(_A )
_UpperCamelCase = PoolFormerOutput(_A , _A , _A , _A )
_UpperCamelCase = PoolFormerGroupNorm(_A )
_UpperCamelCase = PoolFormerGroupNorm(_A )
# Useful for training neural nets
_UpperCamelCase = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
_UpperCamelCase = config.use_layer_scale
if config.use_layer_scale:
_UpperCamelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
_UpperCamelCase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
def UpperCamelCase_ ( self : int , _A : List[Any] ):
if self.use_layer_scale:
_UpperCamelCase = self.pooling(self.before_norm(_A ) )
_UpperCamelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_UpperCamelCase = hidden_states + self.drop_path(_A )
_UpperCamelCase = ()
_UpperCamelCase = self.output(self.after_norm(_A ) )
_UpperCamelCase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_UpperCamelCase = hidden_states + self.drop_path(_A )
_UpperCamelCase = (output,) + outputs
return outputs
else:
_UpperCamelCase = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
_UpperCamelCase = pooling_output + hidden_states
_UpperCamelCase = ()
# Second residual connection inside the PoolFormerOutput block
_UpperCamelCase = self.drop_path(self.output(self.after_norm(_A ) ) )
_UpperCamelCase = hidden_states + layer_output
_UpperCamelCase = (output,) + outputs
return outputs
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : int , _A : Any ):
super().__init__()
_UpperCamelCase = config
# stochastic depth decay rule
_UpperCamelCase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_UpperCamelCase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_UpperCamelCase = nn.ModuleList(_A )
# Transformer blocks
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_UpperCamelCase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_A ) )
_UpperCamelCase = nn.ModuleList(_A )
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : str=False , _A : List[Any]=True ):
_UpperCamelCase = () if output_hidden_states else None
_UpperCamelCase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_UpperCamelCase , _UpperCamelCase = layers
# Get patch embeddings from hidden_states
_UpperCamelCase = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
_UpperCamelCase = blk(_A )
_UpperCamelCase = layer_outputs[0]
if output_hidden_states:
_UpperCamelCase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = PoolFormerConfig
UpperCAmelCase = "poolformer"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = True
def UpperCamelCase_ ( self : Tuple , _A : Any ):
if isinstance(_A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Optional[Any]=False ):
if isinstance(_A , _A ):
_UpperCamelCase = value
_lowerCAmelCase = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Dict , _A : Optional[Any] ):
super().__init__(_A )
_UpperCamelCase = config
_UpperCamelCase = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase_ ( self : int ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : Optional[torch.FloatTensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , )
_UpperCamelCase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : str , _A : List[str] ):
super().__init__()
_UpperCamelCase = nn.Linear(config.hidden_size , config.hidden_size )
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = self.dense(_A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Dict , _A : int ):
super().__init__(_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = PoolFormerModel(_A )
# Final norm
_UpperCamelCase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_UpperCamelCase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[torch.FloatTensor] = None , _A : Optional[torch.LongTensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , ):
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.poolformer(
_A , output_hidden_states=_A , return_dict=_A , )
_UpperCamelCase = outputs[0]
_UpperCamelCase = self.classifier(self.norm(_A ).mean([-2, -1] ) )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(_A , _A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__lowercase ):
UpperCAmelCase = ["keras_nlp"]
def __init__( self : Any , *_A : Dict , **_A : List[str] ):
requires_backends(self , ['''keras_nlp'''] )
| 71 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCAmelCase = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _snake_case ( __snake_case , __snake_case=None ):
require_version(deps[pkg] , __snake_case )
| 71 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | 1 |
def _snake_case ( __snake_case ):
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
_UpperCamelCase = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCamelCase , _UpperCamelCase = unsorted[j - 1], unsorted[j]
_UpperCamelCase = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
_UpperCamelCase , _UpperCamelCase = unsorted[j + 1], unsorted[j]
_UpperCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(f'{cocktail_shaker_sort(unsorted) = }')
| 71 | from sklearn.metrics import mean_squared_error
import datasets
_lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCamelCase_ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ):
_UpperCamelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 71 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = None
class lowerCAmelCase_ ( __lowercase, __lowercase ):
UpperCAmelCase = 2
@register_to_config
def __init__( self : Any , _A : float = 0.02 , _A : float = 100 , _A : float = 1.007 , _A : float = 80 , _A : float = 0.05 , _A : float = 50 , ):
# standard deviation of the initial noise distribution
_UpperCamelCase = sigma_max
# setable values
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None # sigma(t_i)
def UpperCamelCase_ ( self : Union[str, Any] , _A : torch.FloatTensor , _A : Optional[int] = None ):
return sample
def UpperCamelCase_ ( self : Tuple , _A : int , _A : Union[str, torch.device] = None ):
_UpperCamelCase = num_inference_steps
_UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
_UpperCamelCase = torch.from_numpy(_A ).to(_A )
_UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCamelCase = torch.tensor(_A , dtype=torch.floataa , device=_A )
def UpperCamelCase_ ( self : Dict , _A : torch.FloatTensor , _A : float , _A : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=_A ).to(sample.device )
_UpperCamelCase = sigma + gamma * sigma
_UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase_ ( self : Optional[Any] , _A : torch.FloatTensor , _A : float , _A : float , _A : torch.FloatTensor , _A : bool = True , ):
_UpperCamelCase = sample_hat + sigma_hat * model_output
_UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
_UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_A , derivative=_A , pred_original_sample=_A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : torch.FloatTensor , _A : float , _A : float , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : bool = True , ):
_UpperCamelCase = sample_prev + sigma_prev * model_output
_UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
_UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_A , derivative=_A , pred_original_sample=_A )
def UpperCamelCase_ ( self : Dict , _A : List[Any] , _A : int , _A : Tuple ):
raise NotImplementedError()
| 71 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 71 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = image.size
_UpperCamelCase , _UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
_UpperCamelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCamelCase = torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : str , _A : VQModel , _A : UNetaDModel , _A : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : Union[torch.Tensor, PIL.Image.Image] = None , _A : Optional[int] = 1 , _A : Optional[int] = 100 , _A : Optional[float] = 0.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , ):
if isinstance(_A , PIL.Image.Image ):
_UpperCamelCase = 1
elif isinstance(_A , torch.Tensor ):
_UpperCamelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_A )}""" )
if isinstance(_A , PIL.Image.Image ):
_UpperCamelCase = preprocess(_A )
_UpperCamelCase , _UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCamelCase = next(self.unet.parameters() ).dtype
_UpperCamelCase = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
_UpperCamelCase = image.to(device=self.device , dtype=_A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_A , device=self.device )
_UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for t in self.progress_bar(_A ):
# concat latents and low resolution image in the channel dimension.
_UpperCamelCase = torch.cat([latents, image] , dim=1 )
_UpperCamelCase = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
_UpperCamelCase = self.unet(_A , _A ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# decode the image latents with the VQVAE
_UpperCamelCase = self.vqvae.decode(_A ).sample
_UpperCamelCase = torch.clamp(_A , -1.0 , 1.0 )
_UpperCamelCase = image / 2 + 0.5
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 71 | from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowerCAmelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = SavedModel()
_UpperCamelCase = []
with open(os.path.join(__snake_case , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_UpperCamelCase = json.load(__snake_case )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_UpperCamelCase = sorted(__snake_case )
_UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__snake_case ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__snake_case , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_lowerCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 71 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "funnel"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[int] , _A : List[Any]=3_0522 , _A : Dict=[4, 4, 4] , _A : List[Any]=None , _A : Tuple=2 , _A : int=768 , _A : Dict=12 , _A : List[str]=64 , _A : Optional[int]=3072 , _A : List[Any]="gelu_new" , _A : List[str]=0.1 , _A : str=0.1 , _A : Any=0.0 , _A : List[str]=0.1 , _A : Optional[Any]=None , _A : Optional[Any]=1e-9 , _A : Optional[Any]="mean" , _A : List[str]="relative_shift" , _A : Optional[Any]=True , _A : Optional[int]=True , _A : Optional[int]=True , **_A : str , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = block_sizes
_UpperCamelCase = [1] * len(_A ) if block_repeats is None else block_repeats
assert len(_A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCamelCase = num_decoder_layers
_UpperCamelCase = d_model
_UpperCamelCase = n_head
_UpperCamelCase = d_head
_UpperCamelCase = d_inner
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = initializer_std
_UpperCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_UpperCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_UpperCamelCase = attention_type
_UpperCamelCase = separate_cls
_UpperCamelCase = truncate_seq
_UpperCamelCase = pool_q_only
super().__init__(**_A )
@property
def UpperCamelCase_ ( self : int ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase_ ( self : List[Any] , _A : Optional[int] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def UpperCamelCase_ ( self : Tuple ):
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase_ ( self : List[Any] , _A : Optional[int] ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 71 | import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ):
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 71 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
def UpperCamelCase_ ( self : Union[str, Any] ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase_ ( self : Tuple ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase_ ( self : List[str] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = torch.arange(self.height * self.width )
_UpperCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase , *_UpperCamelCase = self.shape
_UpperCamelCase = int(np.prod(_A ) )
_UpperCamelCase = self.get_image_coords()
_UpperCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_UpperCamelCase = self.get_camera_rays(_A )
_UpperCamelCase = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase_ ( self : Optional[Any] , _A : torch.Tensor ):
_UpperCamelCase , *_UpperCamelCase , _UpperCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_UpperCamelCase = coords.view(_A , -1 , 2 )
_UpperCamelCase = self.resolution()
_UpperCamelCase = self.fov()
_UpperCamelCase = (flat.float() / (res - 1)) * 2 - 1
_UpperCamelCase = fracs * torch.tan(fov / 2 )
_UpperCamelCase = fracs.view(_A , -1 , 2 )
_UpperCamelCase = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
_UpperCamelCase = directions / directions.norm(dim=-1 , keepdim=_A )
_UpperCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def _snake_case ( __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_UpperCamelCase = np.array([np.sin(__snake_case ), np.cos(__snake_case ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_UpperCamelCase = -z * 4
_UpperCamelCase = np.array([np.cos(__snake_case ), -np.sin(__snake_case ), 0.0] )
_UpperCamelCase = np.cross(__snake_case , __snake_case )
origins.append(__snake_case )
xs.append(__snake_case )
ys.append(__snake_case )
zs.append(__snake_case )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , width=__snake_case , height=__snake_case , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__snake_case )) , )
| 71 | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71 | 1 |
_lowerCAmelCase = 8.3144598
def _snake_case ( __snake_case , __snake_case ):
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCAmelCase = 300
_lowerCAmelCase = 28
_lowerCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 71 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
UpperCAmelCase = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__snake_case , __snake_case , __snake_case ):
assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case , __snake_case ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator(
__snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
_UpperCamelCase = lmap(str.strip , __snake_case )
write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71 | 1 |
import re
def _snake_case ( __snake_case ):
if len(re.findall('''[ATCG]''' , __snake_case ) ) != len(__snake_case ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | from __future__ import annotations
import typing
from collections import Counter
def _snake_case ( __snake_case ):
_UpperCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
_UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
_UpperCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _snake_case ( __snake_case = 1000 ):
_UpperCamelCase = pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 71 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _snake_case ( __snake_case ):
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case , f"""patch_embeddings.{total_embed_found}.""" )
_UpperCamelCase = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case , __snake_case , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case , __snake_case , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case , __snake_case , '''norm1''' , '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case , __snake_case , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case , __snake_case , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case , __snake_case , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''' , '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def _snake_case ( ):
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 1000
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 1000)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 128, 320, 512]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 128, 320, 512]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 128, 320, 512]
_UpperCamelCase = 4.0
_UpperCamelCase = 1E-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 192, 384, 768]
_UpperCamelCase = 4.0
_UpperCamelCase = 1E-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 192, 384, 768]
_UpperCamelCase = 4.0
_UpperCamelCase = 1E-6
_UpperCamelCase = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
_UpperCamelCase = torch.load(__snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 | import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "esm"
def __init__( self : str , _A : Optional[int]=None , _A : Tuple=None , _A : List[str]=None , _A : str=768 , _A : str=12 , _A : Optional[Any]=12 , _A : int=3072 , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Dict=1026 , _A : List[Any]=0.02 , _A : Dict=1e-12 , _A : Optional[Any]="absolute" , _A : Union[str, Any]=True , _A : Optional[Any]=None , _A : str=False , _A : List[Any]=False , _A : Dict=None , _A : Optional[Any]=None , **_A : Tuple , ):
super().__init__(pad_token_id=_A , mask_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = emb_layer_norm_before
_UpperCamelCase = token_dropout
_UpperCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_UpperCamelCase = EsmFoldConfig()
elif isinstance(_A , _A ):
_UpperCamelCase = EsmFoldConfig(**_A )
_UpperCamelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_UpperCamelCase = get_default_vocab_list()
else:
_UpperCamelCase = vocab_list
else:
_UpperCamelCase = None
_UpperCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _A ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = super().to_dict()
if isinstance(self.esmfold_config , _A ):
_UpperCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = 128
UpperCAmelCase = None
def UpperCamelCase_ ( self : Dict ):
if self.trunk is None:
_UpperCamelCase = TrunkConfig()
elif isinstance(self.trunk , _A ):
_UpperCamelCase = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = asdict(self )
_UpperCamelCase = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 48
UpperCAmelCase = 1024
UpperCAmelCase = 128
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 32
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 4
UpperCAmelCase = 128
UpperCAmelCase = None
def UpperCamelCase_ ( self : List[Any] ):
if self.structure_module is None:
_UpperCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , _A ):
_UpperCamelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_UpperCamelCase = self.sequence_state_dim // self.sequence_head_width
_UpperCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = asdict(self )
_UpperCamelCase = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 384
UpperCAmelCase = 128
UpperCAmelCase = 16
UpperCAmelCase = 128
UpperCAmelCase = 12
UpperCAmelCase = 4
UpperCAmelCase = 8
UpperCAmelCase = 0.1
UpperCAmelCase = 8
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = 7
UpperCAmelCase = 10
UpperCAmelCase = 1e-8
UpperCAmelCase = 1e5
def UpperCamelCase_ ( self : Dict ):
return asdict(self )
def _snake_case ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 71 | import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase_ :
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self : Dict ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ):
_UpperCamelCase = 4
_UpperCamelCase = 32
_UpperCamelCase = (32, 32)
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = (batch_size, num_channels) + sizes
_UpperCamelCase = randn_tensor(_A , generator=_A , device=_A )
_UpperCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
_UpperCamelCase = 128
_UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
_UpperCamelCase = torch.manual_seed(1 )
_UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
_UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A )
if include_skip_sample:
_UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_UpperCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
_UpperCamelCase = unet_block(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCamelCase = output[0, -1, -3:, -3:]
_UpperCamelCase = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
model.to(_A )
model.train()
_UpperCamelCase = model(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = randn_tensor(output.shape , device=_A )
_UpperCamelCase = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 71 | 1 |
from __future__ import annotations
from collections.abc import Callable
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 100 , ):
_UpperCamelCase = x_start
_UpperCamelCase = fnc(__snake_case )
_UpperCamelCase = 0.0
for _ in range(__snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCamelCase = (x_end - x_start) / steps + xa
_UpperCamelCase = fnc(__snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCamelCase = xa
_UpperCamelCase = fxa
return area
if __name__ == "__main__":
def _snake_case ( __snake_case ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_lowerCAmelCase = 10
while i <= 100_000:
print(f'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 71 | def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCamelCase = None
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif name.split('''.''' )[0] == "proj":
_UpperCamelCase = fairseq_model.proj
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def _snake_case ( __snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.split(''' ''' )[0] for line in lines]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case )
_UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
_UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case )
_UpperCamelCase = SpeechaTextaForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
_UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
# add projection layer
_UpperCamelCase = nn.Parameter(projection_layer.weight )
_UpperCamelCase = nn.Parameter(projection_layer.bias )
_UpperCamelCase = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''speech_to_text_2'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 71 | 1 |
from __future__ import annotations
_lowerCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _snake_case ( __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = len(__snake_case )
for i in range(__snake_case ):
_UpperCamelCase = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
_UpperCamelCase = arr[j]
break
result.append(__snake_case )
return result
def _snake_case ( __snake_case ):
_UpperCamelCase = []
for i, outer in enumerate(__snake_case ):
_UpperCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCamelCase = inner
break
result.append(__snake_case )
return result
def _snake_case ( __snake_case ):
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCamelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCAmelCase = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 71 | from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ):
_UpperCamelCase = TFDebertaVaModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ):
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ):
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFDebertaVaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
_UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 71 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 71 | def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
# Return True if there is node that has not iterated.
_UpperCamelCase = [False] * len(__snake_case )
_UpperCamelCase = []
queue.append(__snake_case )
_UpperCamelCase = True
while queue:
_UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case )
_UpperCamelCase = True
_UpperCamelCase = u
return visited[t]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
# This array is filled by BFS and to store path
_UpperCamelCase = [-1] * (len(__snake_case ))
_UpperCamelCase = 0
while bfs(__snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = float('''Inf''' )
_UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase = min(__snake_case , graph[parent[s]][s] )
_UpperCamelCase = parent[s]
max_flow += path_flow
_UpperCamelCase = sink
while v != source:
_UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase = parent[v]
return max_flow
_lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCAmelCase, _lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 71 | 1 |
def _snake_case ( __snake_case , __snake_case = False ):
if not isinstance(__snake_case , __snake_case ):
_UpperCamelCase = f"""Expected string as input, found {type(__snake_case )}"""
raise ValueError(__snake_case )
if not isinstance(__snake_case , __snake_case ):
_UpperCamelCase = f"""Expected boolean as use_pascal parameter, found {type(__snake_case )}"""
raise ValueError(__snake_case )
_UpperCamelCase = input_str.split('''_''' )
_UpperCamelCase = 0 if use_pascal else 1
_UpperCamelCase = words[start_index:]
_UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_UpperCamelCase = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "xmod"
def __init__( self : Any , _A : str=3_0522 , _A : Union[str, Any]=768 , _A : str=12 , _A : Tuple=12 , _A : Any=3072 , _A : List[Any]="gelu" , _A : Dict=0.1 , _A : int=0.1 , _A : Any=512 , _A : str=2 , _A : str=0.02 , _A : Union[str, Any]=1e-12 , _A : List[str]=1 , _A : List[str]=0 , _A : Tuple=2 , _A : Optional[int]="absolute" , _A : Union[str, Any]=True , _A : Union[str, Any]=None , _A : Optional[int]=False , _A : str=2 , _A : str=False , _A : List[str]=True , _A : int=True , _A : int=("en_XX",) , _A : Tuple=None , **_A : str , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
_UpperCamelCase = pre_norm
_UpperCamelCase = adapter_reduction_factor
_UpperCamelCase = adapter_layer_norm
_UpperCamelCase = adapter_reuse_layer_norm
_UpperCamelCase = ln_before_adapter
_UpperCamelCase = list(_A )
_UpperCamelCase = default_language
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : int ):
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 71 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , _A : Dict , _A : Tuple=7 , _A : Tuple=3 , _A : int=18 , _A : List[str]=30 , _A : Optional[int]=400 , _A : str=True , _A : Dict=None , _A : List[str]=True , ):
_UpperCamelCase = size if size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''clusters''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
_UpperCamelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , obj[key] ) )
else:
self.assertEqual(obj[key] , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(_A , '''image_processor.json''' )
image_processor_first.to_json_file(_A )
_UpperCamelCase = self.image_processing_class.from_json_file(_A ).to_dict()
_UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_A )
_UpperCamelCase = self.image_processing_class.from_pretrained(_A ).to_dict()
_UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _A )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def UpperCamelCase_ ( self : int ):
pass
def _snake_case ( ):
_UpperCamelCase = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_UpperCamelCase = Image.open(dataset[4]['''file'''] )
_UpperCamelCase = Image.open(dataset[5]['''file'''] )
_UpperCamelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_UpperCamelCase = prepare_images()
# test non-batched
_UpperCamelCase = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_UpperCamelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _A )
# test batched
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_UpperCamelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A )
| 71 | def _snake_case ( __snake_case , __snake_case , __snake_case ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
_UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
_lowerCAmelCase = 701
_lowerCAmelCase = 1_000_000_000
_lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 71 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase = sys.version_info >= (3, 10)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = field(default="toto", metadata={"help": "help message"} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "titi"
UpperCAmelCase = "toto"
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "titi"
UpperCAmelCase = "toto"
UpperCAmelCase = 42
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = "toto"
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BasicEnum(self.foo )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = "toto"
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = MixedTypeEnum(self.foo )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = None
UpperCAmelCase = field(default=__lowercase, metadata={"help": "help message"} )
UpperCAmelCase = None
UpperCAmelCase = list_field(default=[] )
UpperCAmelCase = list_field(default=[] )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(default=[] )
UpperCAmelCase = list_field(default=[1, 2, 3] )
UpperCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field()
UpperCAmelCase = field()
UpperCAmelCase = field()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BasicEnum(self.required_enum )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = field()
UpperCAmelCase = None
UpperCAmelCase = field(default="toto", metadata={"help": "help message"} )
UpperCAmelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = None
UpperCAmelCase = field(default=__lowercase, metadata={"help": "help message"} )
UpperCAmelCase = None
UpperCAmelCase = list_field(default=[] )
UpperCAmelCase = list_field(default=[] )
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple , _A : argparse.ArgumentParser , _A : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCamelCase = {k: v for k, v in vars(_A ).items() if k != '''container'''}
_UpperCamelCase = {k: v for k, v in vars(_A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _A ) and yy.get('''choices''' , _A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_A ) , yy['''type'''](_A ) )
del xx["type"], yy["type"]
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , required=_A )
expected.add_argument('''--bar''' , type=_A , required=_A )
expected.add_argument('''--baz''' , type=_A , required=_A )
expected.add_argument('''--flag''' , type=_A , default=_A , const=_A , nargs='''?''' )
self.argparsersEqual(_A , _A )
_UpperCamelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCamelCase) , ) = parser.parse_args_into_dataclasses(_A , look_for_args_file=_A )
self.assertFalse(example.flag )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_A )
expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' )
self.argparsersEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , default=_A , const=_A , nargs='''?''' )
expected.add_argument('''--baz''' , type=_A , default=_A , const=_A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_A , dest='''baz''' )
expected.add_argument('''--opt''' , type=_A , default=_A )
_UpperCamelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_A )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(_A )
self.argparsersEqual(_A , _A )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
_UpperCamelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_A , Namespace(foo=_A , baz=_A , opt=_A ) )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_A , _A )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_UpperCamelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase_ ( self : str ):
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = "toto"
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_A , _A )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCamelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_A )
self.argparsersEqual(_A , _A )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(
_A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCamelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_A , type=_A )
expected.add_argument('''--bar''' , default=_A , type=_A , help='''help message''' )
expected.add_argument('''--baz''' , default=_A , type=_A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_A )
_UpperCamelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_A )
for dataclass_type in dataclass_types:
_UpperCamelCase = HfArgumentParser(_A )
self.argparsersEqual(_A , _A )
_UpperCamelCase = parser.parse_args([] )
self.assertEqual(_A , Namespace(foo=_A , bar=_A , baz=_A , ces=[] , des=[] ) )
_UpperCamelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_A , required=_A )
expected.add_argument('''--required_str''' , type=_A , required=_A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , )
self.argparsersEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_A , required=_A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_A , )
expected.add_argument('''--opt''' , type=_A , default=_A )
expected.add_argument('''--baz''' , default='''toto''' , type=_A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_A )
self.argparsersEqual(_A , _A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCamelCase = parser.parse_dict(_A )[0]
_UpperCamelCase = BasicExample(**_A )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_A , parser.parse_dict , _A , allow_extra_keys=_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(_A , '''temp_json''' )
os.mkdir(_A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_A , _A )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCamelCase = BasicExample(**_A )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = HfArgumentParser(_A )
_UpperCamelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(_A , '''temp_yaml''' )
os.mkdir(_A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_A , _A )
_UpperCamelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCamelCase = BasicExample(**_A )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = HfArgumentParser(_A )
self.assertIsNotNone(_A )
| 71 | from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 - _cos) / 2
_UpperCamelCase = 1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = (1 + _cos) / 2
_UpperCamelCase = -1 - _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = _sin / 2
_UpperCamelCase = 0
_UpperCamelCase = -ba
_UpperCamelCase = 1 + alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 1 - alpha
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 + alpha
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = 1 + alpha * big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha * big_a
_UpperCamelCase = 1 + alpha / big_a
_UpperCamelCase = -2 * _cos
_UpperCamelCase = 1 - alpha / big_a
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (pmc + aaa)
_UpperCamelCase = 2 * big_a * mpc
_UpperCamelCase = big_a * (pmc - aaa)
_UpperCamelCase = ppmc + aaa
_UpperCamelCase = -2 * pmpc
_UpperCamelCase = ppmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = 1 / sqrt(2 ) , ):
_UpperCamelCase = tau * frequency / samplerate
_UpperCamelCase = sin(__snake_case )
_UpperCamelCase = cos(__snake_case )
_UpperCamelCase = _sin / (2 * q_factor)
_UpperCamelCase = 10 ** (gain_db / 40)
_UpperCamelCase = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase = 2 * sqrt(__snake_case ) * alpha
_UpperCamelCase = big_a * (ppmc + aaa)
_UpperCamelCase = -2 * big_a * pmpc
_UpperCamelCase = big_a * (ppmc - aaa)
_UpperCamelCase = pmc + aaa
_UpperCamelCase = 2 * mpc
_UpperCamelCase = pmc - aaa
_UpperCamelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 71 | 1 |
from math import sqrt
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCamelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCamelCase = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCamelCase = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCamelCase = list(range(2 , n + 1 ) )
_UpperCamelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCamelCase = 0
# filters actual prime numbers.
_UpperCamelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
_UpperCamelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
_UpperCamelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCamelCase = 2
_UpperCamelCase = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase = 0
# prime factorization of 'number'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( __snake_case ):
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
_UpperCamelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCamelCase = get_prime_numbers(__snake_case )
_UpperCamelCase = len(__snake_case )
# run variable for while-loops.
_UpperCamelCase = 0
_UpperCamelCase = None
# exit variable. for break up the loops
_UpperCamelCase = True
while i < len_pn and loop:
_UpperCamelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCamelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( __snake_case , __snake_case ):
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 0
while numbera != 0:
_UpperCamelCase = numbera % numbera
_UpperCamelCase = numbera
_UpperCamelCase = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( __snake_case , __snake_case ):
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCamelCase = prime_factorization(__snake_case )
_UpperCamelCase = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = max(__snake_case , __snake_case )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCamelCase = prime_fac_a.count(__snake_case )
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCamelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
_UpperCamelCase = 0
_UpperCamelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( __snake_case , __snake_case ):
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCamelCase = p_number_a + 1 # jump to the next number
_UpperCamelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
_UpperCamelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCamelCase = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( __snake_case , __snake_case ):
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCamelCase = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCamelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( __snake_case ):
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCamelCase = ans
ans += fiba
_UpperCamelCase = tmp
return ans
| 71 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = WavaVecaPhonemeCTCTokenizer
UpperCAmelCase = False
def UpperCamelCase_ ( self : str ):
super().setUp()
_UpperCamelCase = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
_UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) )
_UpperCamelCase = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any]=False , _A : List[str]=20 , _A : Optional[Any]=5 ):
_UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_A )) for i in range(len(_A ) )]
_UpperCamelCase = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
_UpperCamelCase = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
_UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
_UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
_UpperCamelCase = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
_UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
_UpperCamelCase = ''' ''' + output_txt
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def UpperCamelCase_ ( self : Dict , **_A : int ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
_UpperCamelCase = tokenizer('''m xxx ɪ''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
_UpperCamelCase = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_UpperCamelCase = tokenizer('''maɪ c''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [3, 200] ) # mai should be <unk> (=3)
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
_UpperCamelCase = tokenizer.decode(tokenizer(_A ).input_ids )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_UpperCamelCase = tokenizer.decode(sample_ids[0] )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(_A , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_UpperCamelCase = tokenizer.decode(sample_ids[0] )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
_UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_A )
_UpperCamelCase = tokenizer.batch_decode(_A , filter_word_delimiter_token=_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
_UpperCamelCase = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
_UpperCamelCase = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_A )
_UpperCamelCase = '''Hello how are you'''
_UpperCamelCase = tokenizer(_A , phonemizer_lang='''en-us''' ).input_ids
_UpperCamelCase = tokenizer(_A , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(_A , _A )
_UpperCamelCase = tokenizer.decode(_A )
_UpperCamelCase = tokenizer.decode(_A )
self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(_A , '''ɛ l o h aʊ a ʁ j u''' )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
_UpperCamelCase = '''Hello how Are you'''
_UpperCamelCase = '''hello how are you'''
_UpperCamelCase = tokenizer(_A ).input_ids
_UpperCamelCase = tokenizer(_A ).input_ids
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def UpperCamelCase_ ( _A : Optional[int] , _A : str ):
_UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_UpperCamelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_UpperCamelCase = tokenizer.decode(_A , output_char_offsets=_A , filter_word_delimiter_token=_A )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(_A : List[str] , _A : List[str] ):
self.assertTrue(isinstance(_A , _A ) )
self.assertTrue(isinstance(outputs_list[0] , _A ) )
# transform list to ModelOutput
_UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(_A : Tuple , _A : Any ):
if isinstance(_A , _A ):
[recursive_check(_A , _A ) for la, la in zip(_A , _A )]
self.assertEqual(_A , _A )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_UpperCamelCase = tokenizer.batch_decode(_A , output_char_offsets=_A )
_UpperCamelCase = [tokenizer.decode(_A , output_char_offsets=_A ) for ids in sample_ids]
check_list_tuples_equal(_A , _A )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def UpperCamelCase_ ( self : str ):
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(_A )
self.assertNotEqual(_A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCamelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
_UpperCamelCase = tokenizer.add_tokens(_A )
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size + len(_A ) )
_UpperCamelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_UpperCamelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
_UpperCamelCase = tokenizer.add_special_tokens(_A )
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size_a + len(_A ) )
_UpperCamelCase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def UpperCamelCase_ ( self : str ):
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : Any ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_UpperCamelCase = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_UpperCamelCase = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
_UpperCamelCase = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(output['''text'''] , _A )
| 71 | from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__lowercase ):
UpperCAmelCase = ["keras_nlp"]
def __init__( self : Any , *_A : Dict , **_A : List[str] ):
requires_backends(self , ['''keras_nlp'''] )
| 71 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "longformer"
def __init__( self : Dict , _A : Union[List[int], int] = 512 , _A : int = 2 , _A : int = 1 , _A : int = 0 , _A : int = 2 , _A : int = 3_0522 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : int = 3072 , _A : str = "gelu" , _A : float = 0.1 , _A : float = 0.1 , _A : int = 512 , _A : int = 2 , _A : float = 0.02 , _A : float = 1e-12 , _A : bool = False , **_A : str , ):
super().__init__(pad_token_id=_A , **_A )
_UpperCamelCase = attention_window
_UpperCamelCase = sep_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = onnx_export
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Optional[Any] , _A : "PretrainedConfig" , _A : str = "default" , _A : "List[PatchingSpec]" = None ):
super().__init__(_A , _A , _A )
_UpperCamelCase = True
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = super().outputs
if self.task == "default":
_UpperCamelCase = {0: '''batch'''}
return outputs
@property
def UpperCamelCase_ ( self : int ):
return 1e-4
@property
def UpperCamelCase_ ( self : Dict ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def UpperCamelCase_ ( self : Optional[Any] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
_UpperCamelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_UpperCamelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
_UpperCamelCase = 1
return inputs
| 71 | from sklearn.metrics import mean_squared_error
import datasets
_lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCamelCase_ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ):
_UpperCamelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 71 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase = 16
_lowerCAmelCase = 32
def _snake_case ( __snake_case , __snake_case = 16 ):
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def _snake_case ( __snake_case , __snake_case ):
# Initialize accelerator
_UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase = model(**__snake_case )
_UpperCamelCase = outputs.loss
_UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**__snake_case )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __snake_case )
def _snake_case ( ):
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 71 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 71 | 1 |
from numpy import exp, pi, sqrt
def _snake_case ( __snake_case , __snake_case = 0.0 , __snake_case = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "gpt_neox"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=5_0432 , _A : List[Any]=6144 , _A : int=44 , _A : int=64 , _A : Optional[Any]=2_4576 , _A : Any="gelu" , _A : Tuple=0.25 , _A : Union[str, Any]=1_0000 , _A : Tuple=0.0 , _A : Any=0.0 , _A : int=0.1 , _A : List[str]=2048 , _A : Dict=0.02 , _A : Optional[Any]=1e-5 , _A : Tuple=True , _A : List[Any]=0 , _A : Optional[int]=2 , _A : Optional[int]=False , _A : List[Any]=True , _A : Any=None , **_A : Any , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCamelCase_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , _A )
_UpperCamelCase = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 71 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ):
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 71 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
_lowerCAmelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _snake_case ( __snake_case , __snake_case ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( __snake_case ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__snake_case )
def _snake_case ( __snake_case , __snake_case ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_UpperCamelCase = tmp_path_factory.getbasetemp() / '''cache'''
_UpperCamelCase = test_hf_cache_home / '''datasets'''
_UpperCamelCase = test_hf_cache_home / '''metrics'''
_UpperCamelCase = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(__snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(__snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(__snake_case ) )
_UpperCamelCase = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(__snake_case ) )
_UpperCamelCase = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__snake_case ) )
@pytest.fixture(autouse=__snake_case , scope='''session''' )
def _snake_case ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=__snake_case )
def _snake_case ( __snake_case ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , __snake_case )
@pytest.fixture
def _snake_case ( __snake_case ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , __snake_case )
| 71 | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , *_A : Union[str, Any] , **_A : Optional[int] ):
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 71 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = PhobertTokenizer
UpperCAmelCase = False
def UpperCamelCase_ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''l à</w>''']
_UpperCamelCase = {'''unk_token''': '''<unk>'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def UpperCamelCase_ ( self : Optional[Any] , **_A : str ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : str ):
_UpperCamelCase = '''Tôi là VinAI Research'''
_UpperCamelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase = '''Tôi là VinAI Research'''
_UpperCamelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_UpperCamelCase = tokenizer.tokenize(_A )
print(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 71 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
UpperCAmelCase = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__snake_case , __snake_case , __snake_case ):
assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case , __snake_case ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator(
__snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
_UpperCamelCase = lmap(str.strip , __snake_case )
write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
_UpperCamelCase = os.path.abspath(__snake_case )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_UpperCamelCase = convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , ):
def is_key_or_prefix_key_in_dict(__snake_case ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_UpperCamelCase = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_UpperCamelCase = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_UpperCamelCase = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_UpperCamelCase = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCamelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
_UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCamelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
_UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCamelCase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCamelCase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_UpperCamelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_UpperCamelCase = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_UpperCamelCase = pt_tuple_key[-2] + '''_v'''
if name is not None:
_UpperCamelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( __snake_case , __snake_case ):
# convert pytorch tensor to numpy
_UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCamelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_UpperCamelCase = flax_model.params['''params''']
else:
_UpperCamelCase = flax_model.params
_UpperCamelCase = flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCamelCase = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
_UpperCamelCase = {}
_UpperCamelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
_UpperCamelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCamelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
_UpperCamelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCamelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCamelCase , _UpperCamelCase = rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
_UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCamelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_UpperCamelCase = jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
import torch
# Load the index
_UpperCamelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
_UpperCamelCase = torch.load(__snake_case )
_UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCamelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCamelCase = flax_model.params['''params''']
_UpperCamelCase = flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
_UpperCamelCase = flax_model.params
_UpperCamelCase = flatten_dict(__snake_case )
_UpperCamelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
_UpperCamelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCamelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
_UpperCamelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCamelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCamelCase , _UpperCamelCase = rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
_UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCamelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_UpperCamelCase = jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
_UpperCamelCase = jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = os.path.abspath(__snake_case )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
_UpperCamelCase = getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
_UpperCamelCase = from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def _snake_case ( __snake_case , __snake_case ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
_UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
_UpperCamelCase = jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
_UpperCamelCase = flatten_dict(__snake_case )
_UpperCamelCase = pt_model.state_dict()
_UpperCamelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
_UpperCamelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_UpperCamelCase = []
_UpperCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_UpperCamelCase = flax_key_tuple[0] == pt_model.base_model_prefix
_UpperCamelCase = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCamelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCamelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
_UpperCamelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCamelCase = jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
_UpperCamelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCamelCase = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_UpperCamelCase = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
_UpperCamelCase = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
_UpperCamelCase = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_UpperCamelCase = '''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_UpperCamelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_UpperCamelCase = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
_UpperCamelCase = key_components[-2] + '''_v'''
if name is not None:
_UpperCamelCase = key_components[:-3] + [name]
_UpperCamelCase = '''.'''.join(__snake_case )
_UpperCamelCase = key
if flax_key in special_pt_names:
_UpperCamelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_UpperCamelCase = np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
_UpperCamelCase = torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
_UpperCamelCase = list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__snake_case ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 71 | from __future__ import annotations
import typing
from collections import Counter
def _snake_case ( __snake_case ):
_UpperCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
_UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
_UpperCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _snake_case ( __snake_case = 1000 ):
_UpperCamelCase = pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 71 | 1 |
from __future__ import annotations
import requests
_lowerCAmelCase = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def _snake_case ( __snake_case , __snake_case = 1 , __snake_case = "new" , __snake_case = None ):
_UpperCamelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
_UpperCamelCase = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__snake_case )
_UpperCamelCase = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
_UpperCamelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
_UpperCamelCase = {}
for id_ in range(__snake_case ):
_UpperCamelCase = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 71 | import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | 1 |
def _snake_case ( __snake_case ):
try:
_UpperCamelCase = float(__snake_case )
except ValueError:
raise ValueError('''Please enter a valid number''' )
_UpperCamelCase = decimal - int(__snake_case )
if fractional_part == 0:
return int(__snake_case ), 1
else:
_UpperCamelCase = len(str(__snake_case ).split('''.''' )[1] )
_UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCamelCase = 10**number_of_frac_digits
_UpperCamelCase , _UpperCamelCase = denominator, numerator
while True:
_UpperCamelCase = dividend % divisor
if remainder == 0:
break
_UpperCamelCase , _UpperCamelCase = divisor, remainder
_UpperCamelCase , _UpperCamelCase = numerator / divisor, denominator / divisor
return int(__snake_case ), int(__snake_case )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 71 | import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase_ :
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self : Dict ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ):
_UpperCamelCase = 4
_UpperCamelCase = 32
_UpperCamelCase = (32, 32)
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = (batch_size, num_channels) + sizes
_UpperCamelCase = randn_tensor(_A , generator=_A , device=_A )
_UpperCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
_UpperCamelCase = 128
_UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
_UpperCamelCase = torch.manual_seed(1 )
_UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
_UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A )
if include_skip_sample:
_UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_UpperCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
_UpperCamelCase = unet_block(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCamelCase = output[0, -1, -3:, -3:]
_UpperCamelCase = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
model.to(_A )
model.train()
_UpperCamelCase = model(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = randn_tensor(output.shape , device=_A )
_UpperCamelCase = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : str ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = '''A red cat sitting on a park bench'''
_UpperCamelCase = np.random.RandomState(0 )
_UpperCamelCase = pipe(
prompt=_A , image=_A , mask_image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_A , output_type='''np''' , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 71 | import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_UpperCamelCase = None
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif name.split('''.''' )[0] == "proj":
_UpperCamelCase = fairseq_model.proj
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def _snake_case ( __snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.split(''' ''' )[0] for line in lines]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(__snake_case )
_UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
_UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __snake_case )
_UpperCamelCase = SpeechaTextaForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
_UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
# add projection layer
_UpperCamelCase = nn.Parameter(projection_layer.weight )
_UpperCamelCase = nn.Parameter(projection_layer.bias )
_UpperCamelCase = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''speech_to_text_2'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 71 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = "Hello world! cécé herlolip"
_lowerCAmelCase = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=__snake_case , large=__snake_case , share_emb=__snake_case , use_bert_emb=__snake_case , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_UpperCamelCase = torch.load(__snake_case , lambda __snake_case , __snake_case : storage )
_UpperCamelCase = AbsSummarizer(__snake_case , torch.device('''cpu''' ) , __snake_case )
original.eval()
_UpperCamelCase = BertAbsSummarizer(__snake_case , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
_UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
_UpperCamelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__snake_case )) )
_UpperCamelCase = torch.tensor(__snake_case ).unsqueeze(0 )
_UpperCamelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__snake_case )) )
_UpperCamelCase = torch.tensor(__snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_UpperCamelCase = encoder_input_ids
_UpperCamelCase = decoder_input_ids
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_UpperCamelCase = original(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )[0]
_UpperCamelCase = original.generator(__snake_case )
_UpperCamelCase = new_model(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )[0]
_UpperCamelCase = new_model.generator(__snake_case )
_UpperCamelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__snake_case ) )
_UpperCamelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(__snake_case ) )
_UpperCamelCase = torch.allclose(__snake_case , __snake_case , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 71 | from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Union[str, Any]=7 , _A : int=True , _A : Optional[int]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[Any]=4 , _A : Optional[Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : str=False , _A : int=True , _A : Any="None" , _A : Dict=3 , _A : List[Any]=4 , _A : Optional[Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : int , _A : Optional[Any] ):
_UpperCamelCase = TFDebertaVaModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] ):
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Dict , _A : Dict , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : int ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Any , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : Optional[int] , _A : str ):
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = TFDebertaVaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
_UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 71 | 1 |
from typing import Any
class lowerCAmelCase_ :
def __init__( self : int , _A : Any ):
_UpperCamelCase = data
_UpperCamelCase = None
class lowerCAmelCase_ :
def __init__( self : List[str] ):
_UpperCamelCase = None
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_UpperCamelCase = temp.next
print()
def UpperCamelCase_ ( self : List[Any] , _A : Any ):
_UpperCamelCase = Node(_A )
_UpperCamelCase = self.head
_UpperCamelCase = new_node
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Optional[Any] ):
if node_data_a == node_data_a:
return
else:
_UpperCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCamelCase = node_a.next
_UpperCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCamelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCamelCase , _UpperCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 71 | def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
# Return True if there is node that has not iterated.
_UpperCamelCase = [False] * len(__snake_case )
_UpperCamelCase = []
queue.append(__snake_case )
_UpperCamelCase = True
while queue:
_UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case )
_UpperCamelCase = True
_UpperCamelCase = u
return visited[t]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
# This array is filled by BFS and to store path
_UpperCamelCase = [-1] * (len(__snake_case ))
_UpperCamelCase = 0
while bfs(__snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = float('''Inf''' )
_UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase = min(__snake_case , graph[parent[s]][s] )
_UpperCamelCase = parent[s]
max_flow += path_flow
_UpperCamelCase = sink
while v != source:
_UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase = parent[v]
return max_flow
_lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCAmelCase, _lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 71 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( __snake_case ):
_UpperCamelCase = filter(lambda __snake_case : p.requires_grad , model.parameters() )
_UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
if metric == "rouge2":
_UpperCamelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_UpperCamelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_UpperCamelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_UpperCamelCase = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( __snake_case , __snake_case ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=__snake_case , verbose=__snake_case , )
class lowerCAmelCase_ ( pl.Callback ):
def UpperCamelCase_ ( self : int , _A : Optional[int] , _A : Dict ):
_UpperCamelCase = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def UpperCamelCase_ ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Tuple=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCamelCase = od / '''test_results.txt'''
_UpperCamelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCamelCase = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCamelCase = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , '''a+''' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCamelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
_UpperCamelCase = val.item()
_UpperCamelCase = F"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
_UpperCamelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_A )
@rank_zero_only
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Tuple ):
try:
_UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCamelCase = pl_module.model.num_parameters()
_UpperCamelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase_ ( self : str , _A : pl.Trainer , _A : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , '''test''' )
@rank_zero_only
def UpperCamelCase_ ( self : Optional[Any] , _A : pl.Trainer , _A : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 71 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : str , ):
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
_UpperCamelCase = field
_UpperCamelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
_UpperCamelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def UpperCamelCase_ ( self : List[str] ):
# Build iterable dataset
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
_UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCamelCase = dataset
_UpperCamelCase = path_or_buf
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = '''utf-8'''
_UpperCamelCase = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.to_json_kwargs.pop('''path_or_buf''' , _A )
_UpperCamelCase = self.to_json_kwargs.pop('''orient''' , '''records''' )
_UpperCamelCase = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_UpperCamelCase = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_UpperCamelCase = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
_UpperCamelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
_UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self : int , _A : BinaryIO , _A : Dict , _A : Optional[Any] , _A : Dict , **_A : str , ):
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 71 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.