code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 187 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
A = logging.get_logger(__name__)
A = 'T5Config'
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """mt5"""
__A = MTaConfig
| 187 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 1 |
from math import sqrt
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : int = 0
snake_case__ : int = 0
snake_case__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 297 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Dict , __A : str=1_3 , __A : str=7 , __A : Optional[int]=True , __A : int=True , __A : str=True , __A : Tuple=True , __A : Optional[int]=9_9 , __A : Optional[int]=3_2 , __A : Any=5 , __A : List[Any]=4 , __A : str=3_7 , __A : Union[str, Any]="gelu" , __A : str=0.1 , __A : Dict=0.1 , __A : Union[str, Any]=5_1_2 , __A : str=1_6 , __A : Optional[int]=2 , __A : List[Any]=0.0_2 , __A : Union[str, Any]=3 , __A : Optional[Any]=4 , __A : Optional[int]=None , ):
snake_case__ : int = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : int = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : str = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Union[str, Any] = scope
def _lowercase ( self : int ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Optional[Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowercase ( self : Tuple , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : Tuple ):
snake_case__ : List[str] = NystromformerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : str = model(__A , attention_mask=__A , token_type_ids=__A )
snake_case__ : Optional[int] = model(__A , token_type_ids=__A )
snake_case__ : Any = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __A : Tuple , __A : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Dict = NystromformerForMaskedLM(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , __A : List[str] , __A : Tuple , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Union[str, Any] ):
snake_case__ : Any = NystromformerForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , __A : str , __A : Any , __A : str , __A : Optional[int] , __A : str , __A : Optional[Any] , __A : Union[str, Any] ):
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = NystromformerForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : Any , __A : Optional[Any] , __A : int , __A : Union[str, Any] , __A : List[str] , __A : Any ):
snake_case__ : int = self.num_labels
snake_case__ : Tuple = NystromformerForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : List[Any] , __A : str , __A : Optional[int] ):
snake_case__ : str = self.num_choices
snake_case__ : Optional[int] = NystromformerForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : str = config_and_inputs
snake_case__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : int = NystromformerModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : str ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : str = type
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Any ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : int ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = NystromformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : List[Any] ):
snake_case__ : str = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case__ : Any = model(__A )[0]
snake_case__ : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = "the [MASK] of Belgium is Brussels"
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : Tuple = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : List[Any] = tokenizer(__A , return_tensors="pt" )
with torch.no_grad():
snake_case__ : List[str] = model(encoding.input_ids ).logits
snake_case__ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__A ) , "capital" )
| 297 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_A = 5_0_0_0_0
_A = 5_0_0_0
_A , _A = os.path.split(__file__)
_A = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
for i in range(a_ ):
lowerCamelCase : Optional[Any] = dataset[i]
@get_duration
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
for i in range(0, len(a_ ), a_ ):
lowerCamelCase : Union[str, Any] = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
with dataset.formatted_as(type=a_ ):
for i in range(a_ ):
lowerCamelCase : Union[str, Any] = dataset[i]
@get_duration
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
with dataset.formatted_as(type=a_ ):
for i in range(0, a_, a_ ):
lowerCamelCase : Any = dataset[i : i + batch_size]
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = {'num examples': SPEED_TEST_N_EXAMPLES}
lowerCamelCase : int = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
lowerCamelCase : Any = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
lowerCamelCase : int = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(a_, 'dataset.arrow' ), a_, num_examples=a_, seq_shapes={'list': (100,)}, )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__, str(a_ ) )
lowerCamelCase : Optional[Any] = func(a_, **a_ )
print('shuffling dataset' )
lowerCamelCase : Tuple = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ', func.__name__, str(a_ ) )
lowerCamelCase : List[str] = func(
a_, **a_ )
with open(a_, 'wb' ) as f:
f.write(json.dumps(a_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 702 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(a_ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
for j in range(a_ ):
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = [float('inf' )] * vertex_count
lowerCamelCase : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a_ ):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowerCamelCase : Dict = distance[u] + w
lowerCamelCase : Any = check_negative_cycle(a_, a_, a_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('Enter number of vertices: ').strip())
_A = int(input('Enter number of edges: ').strip())
_A = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_A , _A , _A = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_A = {'src': src, 'dst': dest, 'weight': weight}
_A = int(input('\nEnter shortest path source:').strip())
_A = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 133 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = ["""image_processor""", """tokenizer"""]
__magic_name__ :List[str] = """BridgeTowerImageProcessor"""
__magic_name__ :Dict = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel_values + pixel_mask
lowerCAmelCase__ :str = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , **__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer.model_input_names
lowerCAmelCase__ :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 93 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : List[str] ):
__lowercase : Optional[int] = tempfile.mkdtemp()
# fmt: off
__lowercase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase : int = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__lowercase : Any = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_snake_case , _snake_case )
def snake_case_ ( self : Optional[Any] , **_snake_case : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : List[Any] , **_snake_case : int ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : List[str] ):
__lowercase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Any ):
__lowercase : Optional[int] = self.get_tokenizer()
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
__lowercase : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : Tuple = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
__lowercase : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : Dict = self.get_image_processor()
__lowercase : Optional[int] = self.get_tokenizer()
__lowercase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : int = self.prepare_image_inputs()
__lowercase : Optional[Any] = image_processor(_snake_case , return_tensors='''np''' )
__lowercase : List[str] = processor(images=_snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : Dict = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : List[str] = '''lower newer'''
__lowercase : Optional[int] = processor(text=_snake_case )
__lowercase : Any = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = self.get_image_processor()
__lowercase : int = self.get_tokenizer()
__lowercase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : str = '''lower newer'''
__lowercase : str = self.prepare_image_inputs()
__lowercase : Tuple = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_snake_case ):
processor()
def snake_case_ ( self : Optional[int] ):
__lowercase : Tuple = self.get_image_processor()
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : List[str] = processor.batch_decode(_snake_case )
__lowercase : Any = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Any ):
__lowercase : List[str] = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Any = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : Union[str, Any] = '''lower newer'''
__lowercase : Union[str, Any] = self.prepare_image_inputs()
__lowercase : Any = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 509 | 0 |
import argparse
import json
from tqdm import tqdm
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" ,type=SCREAMING_SNAKE_CASE__ ,default="""biencoder-nq-dev.json""" ,help="""Path to raw DPR training data""" ,)
parser.add_argument(
"""--evaluation_set""" ,type=SCREAMING_SNAKE_CASE__ ,help="""where to store parsed evaluation_set file""" ,)
parser.add_argument(
"""--gold_data_path""" ,type=SCREAMING_SNAKE_CASE__ ,help="""where to store parsed gold_data_path file""" ,)
lowerCAmelCase : Optional[int] = parser.parse_args()
with open(args.src_path ,"""r""" ) as src_file, open(args.evaluation_set ,"""w""" ) as eval_file, open(
args.gold_data_path ,"""w""" ) as gold_file:
lowerCAmelCase : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Any = dpr_record["""question"""]
lowerCAmelCase : Optional[Any] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(SCREAMING_SNAKE_CASE__ ) + """\n""" )
if __name__ == "__main__":
main()
| 693 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 1 |
import os
def _snake_case ( __snake_case = "matrix.txt" ):
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as in_file:
_UpperCamelCase = in_file.read()
_UpperCamelCase = [[int(__snake_case ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
_UpperCamelCase = [[0 for cell in row] for row in grid]
_UpperCamelCase = len(grid[0] )
_UpperCamelCase = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
_UpperCamelCase = grid[0][0]
for i in range(1 , __snake_case ):
_UpperCamelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , __snake_case ):
_UpperCamelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , __snake_case ):
for j in range(1 , __snake_case ):
_UpperCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] =OpenAIGPTTokenizer
__a : Tuple =OpenAIGPTTokenizerFast
__a : Any =True
__a : Union[str, Any] =False
def __snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase_ ) )
def __snake_case ( self , UpperCAmelCase_ ):
return "lower newer", "lower newer"
def __snake_case ( self ):
lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase = '''lower'''
lowerCAmelCase = ['''low''', '''er</w>''']
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = tokens + ['''<unk>''']
lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
lowerCAmelCase = '''This is a simple input'''
lowerCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , )
def __snake_case ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
pass
| 707 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ="""switch_transformers"""
__a : Union[str, Any] =["""past_key_values"""]
__a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split('''-''' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 33 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _a (lowercase__ : int , lowercase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
with open(lowercase__ , 'rb' ) as flax_state_f:
__snake_case = from_bytes(lowercase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
__snake_case = ''
__snake_case = flatten_dict(lowercase__ , sep='.' )
__snake_case = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
__snake_case = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
__snake_case = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__snake_case = '.'.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__snake_case = np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
__snake_case = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(lowercase__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 56 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 16 | 0 |
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 708 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCamelCase , lowerCamelCase =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCamelCase =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCamelCase =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 462 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case__ : int ={}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
snake_case__ : Tuple =truncation
snake_case__ : List[str] =tokenize_kwargs
snake_case__ : List[Any] ={}
if return_tensors is not None:
snake_case__ : List[str] =return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case__ : List[str] =self.framework
snake_case__ : List[str] =self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_inputs
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] =self.model(**__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 381 |
def lowercase_ ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A_ ,unittest.TestCase ):
_UpperCamelCase : Any = RobertaTokenizer
_UpperCamelCase : Optional[int] = RobertaTokenizerFast
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = {'''cls_token''': '''<s>'''}
def _snake_case ( self ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a__ : Any = dict(zip(snake_case , range(len(snake_case ) ) ) )
a__ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a__ : int = {"""unk_token""": """<unk>"""}
a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def _snake_case ( self , **snake_case ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _snake_case ( self , **snake_case ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = """lower newer"""
a__ : Optional[int] = """lower newer"""
return input_text, output_text
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : int = """lower newer"""
a__ : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a__ : List[Any] = tokenizer.tokenize(snake_case ) # , add_prefix_space=True)
self.assertListEqual(snake_case , snake_case )
a__ : Dict = tokens + [tokenizer.unk_token]
a__ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=snake_case ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=snake_case ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[int] = self.tokenizer_class.from_pretrained("roberta-base" )
a__ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case )
a__ : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case )
a__ : int = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case , add_prefix_space=snake_case )
a__ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case , add_prefix_space=snake_case )
a__ : int = tokenizer.build_inputs_with_special_tokens(snake_case )
a__ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.get_tokenizer()
a__ : Optional[int] = """Encode this sequence."""
a__ : int = tokenizer.byte_encoder[""" """.encode("utf-8" )[0]]
# Testing encoder arguments
a__ : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case )
a__ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(snake_case , snake_case )
a__ : str = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case )
a__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(snake_case , snake_case )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a__ : Any = tokenizer.encode(snake_case , add_special_tokens=snake_case )
a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(snake_case , snake_case )
# Testing spaces after special tokens
a__ : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"mask_token": AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case )} ) # mask token has a left space
a__ : List[str] = tokenizer.convert_tokens_to_ids(snake_case )
a__ : Tuple = """Encode <mask> sequence"""
a__ : str = """Encode <mask>sequence"""
a__ : int = tokenizer.encode(snake_case )
a__ : Union[str, Any] = encoded.index(snake_case )
a__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(snake_case , snake_case )
a__ : List[Any] = tokenizer.encode(snake_case )
a__ : Optional[Any] = encoded.index(snake_case )
a__ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(snake_case , snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
a__ : int = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
a__ : Optional[int] = """A, <mask> AllenNLP sentence."""
a__ : Optional[int] = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
a__ : Any = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , snake_case )
self.assertEqual(post_processor_state["add_prefix_space"] , snake_case )
self.assertEqual(post_processor_state["trim_offsets"] , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
a__ : List[Any] = F"""{text_of_1_token} {text_of_1_token}"""
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : Optional[int] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , )
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ) + 1, len(snake_case ) + 1 + len(snake_case )) , )
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ), len(snake_case ) + 1 + len(snake_case )) , )
a__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(snake_case ), len(snake_case ) + 1 + len(snake_case )) , )
a__ : Tuple = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : Optional[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ) + 1, 1 + len(snake_case ) + 1 + len(snake_case )) , )
a__ : Dict = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : Union[str, Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ), 1 + len(snake_case ) + 1 + len(snake_case )) , )
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(
snake_case , use_fast=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case )
a__ : List[Any] = tokenizer_r(snake_case , return_offsets_mapping=snake_case , add_special_tokens=snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(snake_case ), 1 + len(snake_case ) + 1 + len(snake_case )) , )
| 708 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : List[Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'roformer'
def __init__( self , snake_case_=5_0000 , snake_case_=None , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1536 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0 , snake_case_=False , snake_case_=True , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
_A = vocab_size
_A = hidden_size if embedding_size is None else embedding_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = rotary_value
_A = use_cache
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 27 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 27 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase : Optional[Any] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : List[Any] = {}
state_dict.pop("pixel_mean" , lowerCAmelCase__ )
state_dict.pop("pixel_std" , lowerCAmelCase__ )
snake_case__ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Union[str, Any] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ : Optional[int] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
snake_case__ : Union[str, Any] = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
snake_case__ : str = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
snake_case__ : List[str] = key.replace("layers.2" , "proj_out" )
snake_case__ : Any = value
snake_case__ : str = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict="ybelkada/segment-anything" ):
snake_case__ : Dict = hf_hub_download(lowerCAmelCase__ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
snake_case__ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case__ : List[Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case__ : int = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
snake_case__ : List[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case__ : Optional[int] = SamConfig(
vision_config=lowerCAmelCase__ , )
snake_case__ : Optional[int] = torch.load(lowerCAmelCase__ , map_location="cpu" )
snake_case__ : Union[str, Any] = replace_keys(lowerCAmelCase__ )
snake_case__ : Dict = SamImageProcessor()
snake_case__ : List[str] = SamProcessor(image_processor=lowerCAmelCase__ )
snake_case__ : Optional[int] = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
snake_case__ : Optional[Any] = hf_model.to("cuda" )
snake_case__ : int = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
snake_case__ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
snake_case__ : List[Any] = [[[400, 650]]]
snake_case__ : str = [[1]]
snake_case__ : Union[str, Any] = processor(images=np.array(lowerCAmelCase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : Any = hf_model(**lowerCAmelCase__ )
snake_case__ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
snake_case__ : List[str] = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : int = hf_model(**lowerCAmelCase__ )
snake_case__ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
snake_case__ : List[Any] = ((75, 275, 1725, 850),)
snake_case__ : Dict = processor(images=np.array(lowerCAmelCase__ ) , input_boxes=lowerCAmelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : Dict = hf_model(**lowerCAmelCase__ )
snake_case__ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
snake_case__ : Any = [[[400, 650], [800, 650]]]
snake_case__ : Any = [[1, 1]]
snake_case__ : Any = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : List[Any] = hf_model(**lowerCAmelCase__ )
snake_case__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
__lowerCamelCase : List[Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCamelCase : int = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
"""simple docstring"""
import math
from collections.abc import Callable
def A__ ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
_UpperCAmelCase = xa
_UpperCAmelCase = xa
while True:
if x_n == x_na or function(_UpperCAmelCase ) == function(_UpperCAmelCase ):
raise ZeroDivisionError("float division by zero, could not find root" )
_UpperCAmelCase = x_na - (
function(_UpperCAmelCase ) / ((function(_UpperCAmelCase ) - function(_UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_UpperCAmelCase = x_na
_UpperCAmelCase = x_na
def A__ ( A__ ) -> float:
'''simple docstring'''
return math.pow(_UpperCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 426 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = ['''image_processor''', '''tokenizer''']
A__ = '''CLIPImageProcessor'''
A__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , __a : str=None , __a : List[Any]=None , **__a : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
__snake_case : List[str] = kwargs.pop('feature_extractor' )
__snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Optional[int]=None , __a : Union[str, Any]=None , **__a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__snake_case : Any = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__snake_case : str = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def A_ ( self : List[Any] , *__a : Dict , **__a : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def A_ ( self : str , *__a : Tuple , **__a : List[str] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Any ) -> Any:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor
| 286 | 0 |
from __future__ import annotations
import time
UpperCAmelCase_ : List[Any] = list[tuple[int, int]]
UpperCAmelCase_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = parent
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase__ )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase__ )
A__ = [self.start]
A__ = False
def __A ( self ):
while self.node_queue:
A__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
A__ = True
return self.retrace_path(UpperCamelCase__ )
A__ = self.get_successors(UpperCamelCase__ )
for node in successors:
self.node_queue.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def __A ( self , UpperCAmelCase__ ):
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , UpperCamelCase__ ) )
return successors
def __A ( self , UpperCAmelCase__ ):
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__ )
A__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__ )
A__ = False
def __A ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
A__ = self.fwd_bfs.node_queue.pop(0 )
A__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
A__ = True
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.fwd_bfs.retrace_path(UpperCamelCase__ )
A__ = self.bwd_bfs.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[int] = (0, 0)
UpperCAmelCase_ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Optional[Any] = BreadthFirstSearch(init, goal)
UpperCAmelCase_ : Tuple = bfs.search()
UpperCAmelCase_ : int = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : List[str] = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase_ : Optional[int] = bd_bfs.search()
UpperCAmelCase_ : int = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 711 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Optional[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase_ : Dict = spec.loader.load_module()
UpperCAmelCase_ : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
UpperCAmelCase_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase ( )-> Union[str, Any]:
"""simple docstring"""
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
A__ = False
# source code of `config_class`
A__ = inspect.getsource(_A )
A__ = _re_checkpoint.findall(_A )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
A__ , A__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
A__ = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
A__ = True
break
A__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
A__ = "\n".join(sorted(_A ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 232 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 ):
lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(
unet=snake_case , scheduler=snake_case , movq=snake_case , )
lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
if latents is None:
lowercase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase = latents.to(snake_case )
lowercase = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
lowercase = self._execution_device
lowercase = guidance_scale > 1.0
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
if isinstance(snake_case , snake_case ):
lowercase = torch.cat(snake_case , dim=0 )
lowercase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase = image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
lowercase = hint.repeat_interleave(snake_case , dim=0 )
lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
lowercase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
lowercase = self.scheduler.timesteps
lowercase = self.movq.config.latent_channels
lowercase , lowercase = downscale_height_and_width(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = {'image_embeds': image_embeds, 'hint': hint}
lowercase = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase = noise_pred.chunk(2 )
lowercase , lowercase = variance_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , )[0]
# post-processing
lowercase = self.movq.decode(snake_case , force_not_quantize=snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase = image * 0.5 + 0.5
lowercase = image.clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 84 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple ={
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =["""MobileNetV2FeatureExtractor"""]
_lowerCAmelCase : Optional[int] =["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] =[
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Optional[Any] = logging.getLogger()
lowerCamelCase_ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCamelCase (lowerCamelCase ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = {'source': 'What is love ?', 'target': 'life'}
__snake_case = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__snake_case = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , f'''{split}.{field}''' ) , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "pytorch" ):
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'output' )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'data' )
self._create_dummy_data(data_dir=SCREAMING_SNAKE_CASE_ )
__snake_case = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__snake_case = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , 'metrics.json' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
__snake_case = json.load(SCREAMING_SNAKE_CASE_ )
return result
@require_torch_gpu
def __lowerCamelCase ( self ):
__snake_case = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def __lowerCamelCase ( self ):
__snake_case = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCamelCase ( self ):
__snake_case = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCamelCase ( self ):
__snake_case = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 345 |
from __future__ import annotations
import time
import numpy as np
lowerCamelCase_ : Any = [8, 5, 9, 7]
lowerCamelCase_ : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase_ : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
__snake_case = claim_vector
__snake_case = allocated_resources_table
__snake_case = maximum_claim_table
def __lowerCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCamelCase ( self ):
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
__snake_case = self.__need()
__snake_case = self.__allocated_resources_table
__snake_case = self.__available_resources()
__snake_case = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case = False
for each_need in need_list:
__snake_case = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
__snake_case = False
break
if execution:
__snake_case = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
__snake_case = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowerCamelCase ( self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int
a_ : TreeNode | None =None
a_ : TreeNode | None =None
lowerCAmelCase_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase_ ( lowerCAmelCase: TreeNode | None )-> int:
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase ) != count_coins(lowerCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCAmelCase: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_snake_case , _snake_case : Optional[int] = get_distrib(node.left )
_snake_case , _snake_case : int = get_distrib(node.right )
_snake_case : List[Any] = 1 - left_distrib_excess
_snake_case : Any = 1 - right_distrib_excess
_snake_case : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase )
+ abs(lowerCAmelCase )
)
_snake_case : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase , lowerCAmelCase )
return get_distrib(lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=True , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=10 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Tuple=32 * 4 , UpperCamelCase : Tuple=32 * 6 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : List[Any]=32 , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : List[str] = is_training
_snake_case : Optional[int] = use_auxiliary_loss
_snake_case : Optional[Any] = num_queries
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = min_size
_snake_case : Dict = max_size
_snake_case : str = num_labels
_snake_case : List[Any] = mask_feature_size
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase )
_snake_case : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase )
_snake_case : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase ) > 0.5
).float()
_snake_case : Any = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase ) > 0.5).long()
_snake_case : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : int = output.encoder_hidden_states
_snake_case : Tuple = output.pixel_decoder_hidden_states
_snake_case : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : str=False ):
'''simple docstring'''
with torch.no_grad():
_snake_case : str = MaskFormerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase )
_snake_case : str = model(UpperCamelCase , output_hidden_states=UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : str = MaskFormerForInstanceSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
def comm_check_on_output(UpperCamelCase : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase )
_snake_case : Optional[Any] = model(UpperCamelCase )
comm_check_on_output(UpperCamelCase )
_snake_case : Union[str, Any] = model(
pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase )
comm_check_on_output(UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ : Tuple =(
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ : Any =False
a_ : List[str] =False
a_ : List[str] =False
a_ : Optional[Any] =False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = MaskFormerModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(UpperCamelCase )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case : int = MaskFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = (self.model_tester.min_size,) * 2
_snake_case : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase ),
'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase ).long(),
}
_snake_case : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase )
_snake_case : Any = model(**UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(UpperCamelCase ).to(UpperCamelCase )
_snake_case : Dict = model(**UpperCamelCase , output_attentions=UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case : Dict = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
_snake_case : int = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
_snake_case : Optional[Any] = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase ).loss
loss.backward()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs()
_snake_case : List[str] = True
_snake_case : List[Any] = True
_snake_case : List[Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
_snake_case : Dict = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase )
_snake_case : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase_ = 1E-4
def lowerCamelCase_ ( )-> List[Any]:
_snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(UpperCamelCase )
_snake_case : Dict = self.default_image_processor
_snake_case : Tuple = prepare_img()
_snake_case : str = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**UpperCamelCase )
_snake_case : Tuple = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
_snake_case : Optional[int] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
_snake_case : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase )
.eval()
)
_snake_case : Any = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : List[str] = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : Optional[Any] = model(**UpperCamelCase )
# masks_queries_logits
_snake_case : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case : Any = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_snake_case : str = torch.tensor(UpperCamelCase ).to(UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
# class_queries_logits
_snake_case : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case : Tuple = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(UpperCamelCase )
.eval()
)
_snake_case : int = self.default_image_processor
_snake_case : Optional[int] = prepare_img()
_snake_case : int = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : List[Any] = model(**UpperCamelCase )
# masks_queries_logits
_snake_case : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case : List[Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_snake_case : Union[str, Any] = torch.tensor(UpperCamelCase ).to(UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
# class_queries_logits
_snake_case : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case : Union[str, Any] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase )
.eval()
)
_snake_case : Optional[int] = self.default_image_processor
_snake_case : Dict = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
_snake_case : List[str] = inputs['pixel_values'].to(UpperCamelCase )
_snake_case : Tuple = [el.to(UpperCamelCase ) for el in inputs['mask_labels']]
_snake_case : Optional[int] = [el.to(UpperCamelCase ) for el in inputs['class_labels']]
with torch.no_grad():
_snake_case : List[str] = model(**UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 411 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : Any = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """google/ddpm-cifar10-32"""
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PNDMScheduler()
__SCREAMING_SNAKE_CASE = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pndm(generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 627 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
with open(a__ , """r""" ) as file:
for line_number, line in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Any = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a__ ( a__ , a__ , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(a__ )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(a__ , a__ , a__ , a__ , a__ )
else:
set_recursively(a__ , a__ , a__ , a__ , a__ )
return is_used
return is_used
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(a__ , a__ , a__ )
if not is_used:
unused_weights.append(a__ )
logger.warning(F'Unused weights: {unused_weights}' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def a__ ( a__ , a__ , a__=None , a__=None , a__=True , a__=False ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(a__ )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
feature_extractor.save_pretrained(a__ )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a__ , a__ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a__ , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(a__ , a__ , not is_finetuned )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 627 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
a : List[Any] = '''CompVis/stable-diffusion-v1-2'''
a : List[str] = '''CompVis/stable-diffusion-v1-3'''
a : Tuple = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
def __init__( self : List[str] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : List[str] , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : int = True , ):
"""simple docstring"""
super()._init_()
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A ( self : List[Any] ):
"""simple docstring"""
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("_" )}
def A ( self : str , a_ : List[str] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def A ( self : int ):
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def A ( self : Tuple , a_ : Dict , a_ : Optional[Any] = 512 , a_ : Union[str, Any] = 512 , a_ : int = 50 , a_ : Any = 7.5 , a_ : List[Any] = None , a_ : int = 1 , a_ : Optional[Any] = 0.0 , a_ : Tuple = None , a_ : Union[str, Any] = None , a_ : Any = "pil" , a_ : str = True , a_ : Union[str, Any] = None , a_ : List[str] = 1 , **a_ : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : List[str] , a_ : Optional[Any] , a_ : Optional[int] = 512 , a_ : Tuple = 512 , a_ : Union[str, Any] = 50 , a_ : Tuple = 7.5 , a_ : Optional[int] = None , a_ : Optional[Any] = 1 , a_ : List[Any] = 0.0 , a_ : Any = None , a_ : List[Any] = None , a_ : Union[str, Any] = "pil" , a_ : Dict = True , a_ : Any = None , a_ : List[Any] = 1 , **a_ : int , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : Union[str, Any] , a_ : Any , a_ : int = 512 , a_ : Tuple = 512 , a_ : Optional[int] = 50 , a_ : Dict = 7.5 , a_ : Optional[Any] = None , a_ : int = 1 , a_ : Any = 0.0 , a_ : Optional[int] = None , a_ : str = None , a_ : Tuple = "pil" , a_ : int = True , a_ : Tuple = None , a_ : Dict = 1 , **a_ : List[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : List[Any] , a_ : str , a_ : Union[str, Any] = 512 , a_ : str = 512 , a_ : Dict = 50 , a_ : str = 7.5 , a_ : Optional[Any] = None , a_ : List[str] = 1 , a_ : Union[str, Any] = 0.0 , a_ : List[Any] = None , a_ : Dict = None , a_ : List[str] = "pil" , a_ : str = True , a_ : Dict = None , a_ : str = 1 , **a_ : Optional[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def A ( self : Any , a_ : Union[str, Any] , a_ : str = 512 , a_ : Dict = 512 , a_ : Union[str, Any] = 50 , a_ : Tuple = 7.5 , a_ : Optional[int] = None , a_ : Union[str, Any] = 1 , a_ : List[str] = 0.0 , a_ : Tuple = None , a_ : Optional[Any] = None , a_ : List[str] = "pil" , a_ : List[Any] = True , a_ : Union[str, Any] = None , a_ : Union[str, Any] = 1 , **a_ : Dict , ):
"""simple docstring"""
__snake_case = "cuda" if torch.cuda.is_available() else "cpu"
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
"""simple docstring"""
from functools import lru_cache
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = 2
A__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase__ )
if n > 1:
factors.add(lowerCAmelCase__ )
return factors
@lru_cache
def __lowerCamelCase ( lowerCAmelCase__ ):
return len(unique_prime_factors(lowerCAmelCase__ ) )
def __lowerCamelCase ( lowerCAmelCase__ ):
return len(set(lowerCAmelCase__ ) ) in (0, 1)
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = 2
while True:
# Increment each value of a generated range
A__ = [base + i for i in range(lowerCAmelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A__ = [upf_len(lowerCAmelCase__ ) for x in group]
checker.append(lowerCAmelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase__ ):
return group
# Increment our base variable by 1
base += 1
def __lowerCamelCase ( lowerCAmelCase__ = 4 ):
A__ = run(lowerCAmelCase__ )
return results[0] if len(lowerCAmelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 260 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
if isinstance(__a , __a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A__ = deepcopy(__a )
elif os.path.exists(__a ):
with io.open(__a , 'r' , encoding='utf-8' ) as f:
A__ = json.load(__a )
else:
try:
A__ = baseaa.urlsafe_baadecode(__a ).decode('utf-8' )
A__ = json.loads(__a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
A__ = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_value('zero_optimization.stage' , -1 )
# offload
A__ = False
if self.is_zeroa() or self.is_zeroa():
A__ = set(['cpu', 'nvme'] )
A__ = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A__ = True
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
A__ = nodes.pop()
for node in nodes:
A__ = config.get(__a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , __a , __a=None ):
"""simple docstring"""
A__ , A__ = self.find_config_node(__a )
if config is None:
return default
return config.get(__a , __a )
def _UpperCAmelCase ( self , __a , __a=False ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
for node in nodes:
A__ = config
A__ = config.get(__a )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else bool(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else not bool(__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 2
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 3
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._offload
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = engine
def _UpperCAmelCase ( self , __a , **__a ):
"""simple docstring"""
self.engine.backward(__a , **__a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
super().__init__(__a , device_placement=__a , scaler=__a )
A__ = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , __a=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a , __a ):
"""simple docstring"""
super().__init__(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=0.001 , __a=0 , **__a ):
"""simple docstring"""
A__ = params
A__ = lr
A__ = weight_decay
A__ = kwargs
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=None , __a=0 , **__a ):
"""simple docstring"""
A__ = optimizer
A__ = total_num_steps
A__ = warmup_num_steps
A__ = kwargs
| 260 | 1 |
def snake_case (__lowercase ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowercase , __lowercase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowercase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 580 | import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ):
_snake_case : Any = parent
_snake_case : int = out_indices if out_indices is not None else [4]
_snake_case : Any = stage_names
_snake_case : Optional[Any] = out_features
_snake_case : Dict = backbone
_snake_case : List[str] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : str = num_channels
_snake_case : Optional[Any] = use_pretrained_backbone
_snake_case : str = is_training
def UpperCamelCase ( self ):
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self ):
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case ,_snake_case : List[Any] = config_and_inputs
_snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Dict = TimmBackboneModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
_snake_case : Dict = "resnet18"
_snake_case : Tuple = "microsoft/resnet-18"
_snake_case : Tuple = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3] )
_snake_case : Optional[int] = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(lowercase_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Union[str, Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = True
_snake_case : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_snake_case : Dict = self.all_model_classes[0]
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
_snake_case : List[str] = self._prepare_for_class(lowercase_ , lowercase_ )
_snake_case : List[Any] = model(**lowercase_ )
_snake_case : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
_snake_case : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_snake_case : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_snake_case : Union[str, Any] = copy.deepcopy(lowercase_ )
_snake_case : int = None
_snake_case : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : int = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_snake_case : Dict = copy.deepcopy(lowercase_ )
_snake_case : Dict = False
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ ) | 580 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ : List[str] = logging.get_logger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = set()
__UpperCamelCase = []
def parse_line(__A ):
for line in fp:
if isinstance(__A ,__A ):
__UpperCamelCase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
__UpperCamelCase = """\n""".join(__A )
# Only keep the warnings specified in `targets`
if any(f": {x}: " in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
__UpperCamelCase = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
__UpperCamelCase = os.path.join(__A ,__A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = set()
__UpperCamelCase = [os.path.join(__A ,__A ) for p in os.listdir(__A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A ,__A ) )
return selected_warnings
if __name__ == "__main__":
def _lowercase ( __A ):
'''simple docstring'''
return values.split(""",""" )
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
a__ : str = parser.parse_args()
a__ : List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ : Dict = extract_warnings(args.output_dir, args.targets)
a__ : List[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 601 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def _lowercase ( __A ,__A ,__A ,__A=None ):
'''simple docstring'''
__UpperCamelCase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__UpperCamelCase , __UpperCamelCase = True, True
__UpperCamelCase = dfs(__A ,__A ,__A ,__A )
return path
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = -1
for i in range(__A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__UpperCamelCase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__UpperCamelCase , __UpperCamelCase = check_circuit_or_path(__A ,__A )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
__UpperCamelCase = 1
if check == 2:
__UpperCamelCase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
__UpperCamelCase = dfs(__A ,__A ,__A )
print(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__UpperCamelCase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__UpperCamelCase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__UpperCamelCase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__UpperCamelCase = {
1: [],
2: []
# all degree is zero
}
__UpperCamelCase = 10
check_euler(__A ,__A )
check_euler(__A ,__A )
check_euler(__A ,__A )
check_euler(__A ,__A )
check_euler(__A ,__A )
if __name__ == "__main__":
main()
| 601 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
__SCREAMING_SNAKE_CASE = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__SCREAMING_SNAKE_CASE = 'translator'
__SCREAMING_SNAKE_CASE = AutoTokenizer
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
__SCREAMING_SNAKE_CASE = LANGUAGE_CODES
__SCREAMING_SNAKE_CASE = ['text', 'text', 'text']
__SCREAMING_SNAKE_CASE = ['text']
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
UpperCamelCase : Optional[Any] = self.lang_to_code[src_lang]
UpperCamelCase : List[Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase_ , return_tensors="pt" , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(**UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase_ )
| 714 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : int = int(number**0.5)
return number == sq * sq
def A__ ( A : int , A : int , A : int , A : int , A : int , A : int):
'''simple docstring'''
UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase : int = x_den * y_den * z_den
UpperCamelCase : int = gcd(A , A)
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( A : int = 35):
'''simple docstring'''
UpperCamelCase : set = set()
UpperCamelCase : int
UpperCamelCase : Fraction = Fraction(0)
UpperCamelCase : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
UpperCamelCase : Optional[int] = x_num * y_den + x_den * y_num
UpperCamelCase : str = x_den * y_den
UpperCamelCase : Dict = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Dict = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=2
UpperCamelCase : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase : List[str] = x_den * x_den * y_den * y_den
if is_sq(A) and is_sq(A):
UpperCamelCase : Dict = int(sqrt(A))
UpperCamelCase : Union[str, Any] = int(sqrt(A))
UpperCamelCase : Union[str, Any] = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=-1
UpperCamelCase : Union[str, Any] = x_num * y_num
UpperCamelCase : List[str] = x_den * y_num + x_num * y_den
UpperCamelCase : List[str] = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=2
UpperCamelCase : int = x_num * x_num * y_num * y_num
UpperCamelCase : Any = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A) and is_sq(A):
UpperCamelCase : Optional[int] = int(sqrt(A))
UpperCamelCase : Union[str, Any] = int(sqrt(A))
UpperCamelCase : str = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Union[str, Any] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
for num, den in unique_s:
total += Fraction(A , A)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : List[Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["""ChineseCLIPFeatureExtractor"""]
__lowerCamelCase : List[str] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import os
def A_ ( ) -> Union[str, Any]:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCamelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCamelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 629 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase__ = data_utils.TransfoXLTokenizer
lowerCamelCase__ = data_utils.TransfoXLCorpus
lowerCamelCase__ = data_utils
lowerCamelCase__ = data_utils
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , 'rb' ) as fp:
__lowerCAmelCase : int = pickle.load(_UpperCamelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
__lowerCAmelCase : Optional[Any] = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Dict = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , _UpperCamelCase )
__lowerCAmelCase : List[str] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase : Tuple = os.path.abspath(_UpperCamelCase )
__lowerCAmelCase : List[Any] = os.path.abspath(_UpperCamelCase )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase : Any = TransfoXLConfig()
else:
__lowerCAmelCase : str = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase : Optional[Any] = TransfoXLLMHeadModel(_UpperCamelCase )
__lowerCAmelCase : Dict = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
__lowerCAmelCase : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowerCamelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 549 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def __lowercase ( snake_case ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__magic_name__ :Any = k.replace(snake_case, snake_case )
if k.startswith('''encoder''' ):
__magic_name__ :str = k.replace('''.attn''', '''.self_attn''' )
__magic_name__ :Dict = k.replace('''norm1''', '''self_attn_layer_norm''' )
__magic_name__ :Dict = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__magic_name__ :Any = k.replace('''norm1''', '''self_attn_layer_norm''' )
__magic_name__ :Dict = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
__magic_name__ :Optional[Any] = k.replace('''norm3''', '''final_layer_norm''' )
return k
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__magic_name__ :Dict = sd.pop(snake_case )
__magic_name__ :int = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
__magic_name__ :Optional[Any] = v
SCREAMING_SNAKE_CASE__ : List[str] = ["""START"""]
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Tuple = model['''model''']
__magic_name__ :Dict = BlenderbotConfig.from_json_file(snake_case )
__magic_name__ :str = BlenderbotForConditionalGeneration(snake_case )
__magic_name__ :Tuple = m.model.state_dict().keys()
__magic_name__ :Optional[int] = []
__magic_name__ :int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__magic_name__ :Dict = rename_state_dict_key(snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__magic_name__ :Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case )
m.model.load_state_dict(snake_case, strict=snake_case )
m.half()
m.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 0 |
def a__ ( _UpperCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(_UpperCamelCase ,(list, tuple) ) or not all(
isinstance(_UpperCamelCase ,_UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = numbers[0]
for i in range(1 ,len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
__lowerCamelCase = numbers[i]
if number < 0:
__lowerCamelCase ,__lowerCamelCase = min_till_now, max_till_now
__lowerCamelCase = max(_UpperCamelCase ,max_till_now * number )
__lowerCamelCase = min(_UpperCamelCase ,min_till_now * number )
# update the maximum product found till now
__lowerCamelCase = max(_UpperCamelCase ,_UpperCamelCase )
return max_prod
| 175 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase = 256
class UpperCamelCase_ ( _A ):
lowercase = ['''melgan''']
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> None:
super().__init__()
# From MELGAN
_a : int = math.log(1e-5 ) # Matches MelGAN training.
_a : List[Any] = 4.0 # Largest value for most examples
_a : List[str] = 128
self.register_modules(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
def snake_case__( self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ) -> Any:
_a : Optional[int] = output_range
if clip:
_a : List[str] = torch.clip(lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
_a : Tuple = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case__( self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ) -> Any:
_a : Union[str, Any] = input_range
_a : Any = torch.clip(lowercase , lowercase , lowercase ) if clip else outputs
# Scale to [0, 1].
_a : Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case__( self , lowercase , lowercase , lowercase ) -> Optional[int]:
_a : Optional[int] = input_tokens > 0
_a : List[Any] = self.notes_encoder(
encoder_input_tokens=lowercase , encoder_inputs_mask=lowercase )
_a : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase , encoder_inputs_mask=lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
_a : Tuple = noise_time
if not torch.is_tensor(lowercase ):
_a : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
_a : str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_a : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_a : str = self.decoder(
encodings_and_masks=lowercase , decoder_input_tokens=lowercase , decoder_noise_time=lowercase )
return logits
@torch.no_grad()
def __call__( self , lowercase , lowercase = None , lowercase = 100 , lowercase = True , lowercase = "numpy" , lowercase = None , lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowercase )}.' )
_a : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_a : Any = np.zeros([1, 0, self.n_dims] , np.floataa )
_a : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase ):
if i == 0:
_a : int = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_a : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_a : List[Any] = ones
_a : List[Any] = self.scale_features(
lowercase , output_range=[-1.0, 1.0] , clip=lowercase )
_a : List[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase , continuous_mask=lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_a : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_a : List[str] = self.decode(
encodings_and_masks=lowercase , input_tokens=lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_a : str = self.scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_a : Any = self.scale_to_features(lowercase , input_range=[-1.0, 1.0] )
_a : Union[str, Any] = mel[:1]
_a : Tuple = mel.cpu().float().numpy()
_a : List[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase )
logger.info('''Generated segment''' , lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_a : str = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_a : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase ) | 713 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'vocab.txt'}
__lowerCamelCase = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__lowerCamelCase = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def UpperCamelCase__ ( UpperCAmelCase ) -> int:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' ) as f:
_a : List[str] = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<unk>" , lowercase="<cls>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="<eos>" , **lowercase , ) -> Dict:
super().__init__(**lowercase )
_a : Optional[Any] = load_vocab_file(lowercase )
_a : str = dict(enumerate(self.all_tokens ) )
_a : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_a : List[Any] = unk_token
_a : Dict = cls_token
_a : Tuple = pad_token
_a : List[Any] = mask_token
_a : List[str] = eos_token
_a : Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case__( self , lowercase ) -> str:
return self._id_to_token.get(lowercase , self.unk_token )
def snake_case__( self , lowercase ) -> int:
return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) )
def snake_case__( self , lowercase , **lowercase ) -> Optional[Any]:
return text.split()
def snake_case__( self , lowercase=False ) -> Dict:
return len(self._id_to_token )
def snake_case__( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case__( self , lowercase ) -> int:
return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) )
def snake_case__( self , lowercase ) -> str:
return self._id_to_token.get(lowercase , self.unk_token )
def snake_case__( self , lowercase , lowercase = None ) -> List[int]:
_a : List[str] = [self.cls_token_id]
_a : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case__( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_a : Any = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase ) + [1]
return mask
def snake_case__( self , lowercase , lowercase ) -> Tuple:
_a : List[Any] = os.path.join(lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowercase , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case__( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowercase )
def snake_case__( self , lowercase , lowercase = False ) -> int:
return super()._add_tokens(lowercase , special_tokens=lowercase ) | 307 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class a_ ( a ):
A__ : List[str] = 'autoformer'
A__ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 25 , UpperCAmelCase__ : int = 3 , **UpperCAmelCase__ : int , ):
"""simple docstring"""
# time series specific configuration
snake_case : str = prediction_length
snake_case : List[str] = context_length if context_length is not None else prediction_length
snake_case : List[Any] = distribution_output
snake_case : Optional[Any] = loss
snake_case : List[str] = input_size
snake_case : Any = num_time_features
snake_case : List[Any] = lags_sequence
snake_case : Optional[Any] = scaling
snake_case : Any = num_dynamic_real_features
snake_case : Optional[Any] = num_static_real_features
snake_case : Any = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case : Tuple = cardinality
else:
snake_case : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case : Tuple = embedding_dimension
else:
snake_case : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case : Dict = num_parallel_samples
# Transformer architecture configuration
snake_case : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case : Dict = d_model
snake_case : str = encoder_attention_heads
snake_case : List[str] = decoder_attention_heads
snake_case : Union[str, Any] = encoder_ffn_dim
snake_case : Any = decoder_ffn_dim
snake_case : List[Any] = encoder_layers
snake_case : Tuple = decoder_layers
snake_case : List[str] = dropout
snake_case : Any = attention_dropout
snake_case : Any = activation_dropout
snake_case : List[str] = encoder_layerdrop
snake_case : List[str] = decoder_layerdrop
snake_case : List[str] = activation_function
snake_case : str = init_std
snake_case : int = use_cache
# Autoformer
snake_case : int = label_length
snake_case : int = moving_average
snake_case : Union[str, Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 598 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Any = metric_id
class a_ :
A__ : Dict = [MetricMock(a ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
if "tmp_path" in args:
snake_case : str = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__magic_name__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*__magic_name__ )
| 598 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase_ ( UpperCamelCase_ ):
return x + 2
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[int] ) -> List[str]:
_a : Optional[Any] = '''x = 3'''
_a : List[Any] = {}
_a : Dict = evaluate(__snake_case , {} , state=__snake_case )
assert result == 3
self.assertDictEqual(__snake_case , {'''x''': 3} )
_a : Optional[Any] = '''x = y'''
_a : Any = {'''y''': 5}
_a : Optional[Any] = evaluate(__snake_case , {} , state=__snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__snake_case , {'''x''': 5, '''y''': 5} )
def snake_case_ ( self : Dict ) -> List[Any]:
_a : List[str] = '''y = add_two(x)'''
_a : List[str] = {'''x''': 3}
_a : List[Any] = evaluate(__snake_case , {'''add_two''': add_two} , state=__snake_case )
assert result == 5
self.assertDictEqual(__snake_case , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_a : Any = evaluate(__snake_case , {} , state=__snake_case )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case_ ( self : str ) -> List[Any]:
_a : Optional[int] = '''x = 3'''
_a : int = {}
_a : List[str] = evaluate(__snake_case , {} , state=__snake_case )
assert result == 3
self.assertDictEqual(__snake_case , {'''x''': 3} )
def snake_case_ ( self : str ) -> Dict:
_a : Any = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_a : Optional[int] = {'''x''': 3}
_a : Any = evaluate(__snake_case , {'''add_two''': add_two} , state=__snake_case )
self.assertDictEqual(__snake_case , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__snake_case , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
_a : Union[str, Any] = '''x = 3\ny = 5'''
_a : Any = {}
_a : Optional[int] = evaluate(__snake_case , {} , state=__snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__snake_case , {'''x''': 3, '''y''': 5} )
def snake_case_ ( self : Dict ) -> Tuple:
_a : str = '''text = f\'This is x: {x}.\''''
_a : List[Any] = {'''x''': 3}
_a : int = evaluate(__snake_case , {} , state=__snake_case )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__snake_case , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case_ ( self : Any ) -> Optional[int]:
_a : List[str] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
_a : Tuple = {'''x''': 3}
_a : Tuple = evaluate(__snake_case , {} , state=__snake_case )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__snake_case , {'''x''': 3, '''y''': 2} )
_a : str = {'''x''': 8}
_a : List[Any] = evaluate(__snake_case , {} , state=__snake_case )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__snake_case , {'''x''': 8, '''y''': 5} )
def snake_case_ ( self : Dict ) -> Optional[Any]:
_a : Union[str, Any] = '''test_list = [x, add_two(x)]'''
_a : Dict = {'''x''': 3}
_a : str = evaluate(__snake_case , {'''add_two''': add_two} , state=__snake_case )
self.assertListEqual(__snake_case , [3, 5] )
self.assertDictEqual(__snake_case , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
_a : List[str] = '''y = x'''
_a : Tuple = {'''x''': 3}
_a : Dict = evaluate(__snake_case , {} , state=__snake_case )
assert result == 3
self.assertDictEqual(__snake_case , {'''x''': 3, '''y''': 3} )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_a : int = '''test_list = [x, add_two(x)]\ntest_list[1]'''
_a : Union[str, Any] = {'''x''': 3}
_a : Tuple = evaluate(__snake_case , {'''add_two''': add_two} , state=__snake_case )
assert result == 5
self.assertDictEqual(__snake_case , {'''x''': 3, '''test_list''': [3, 5]} )
_a : Optional[Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_a : Tuple = {'''x''': 3}
_a : Tuple = evaluate(__snake_case , {'''add_two''': add_two} , state=__snake_case )
assert result == 5
self.assertDictEqual(__snake_case , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case_ ( self : Optional[Any] ) -> Any:
_a : Optional[Any] = '''x = 0\nfor i in range(3):\n x = i'''
_a : Optional[Any] = {}
_a : int = evaluate(__snake_case , {'''range''': range} , state=__snake_case )
assert result == 2
self.assertDictEqual(__snake_case , {'''x''': 2, '''i''': 2} )
| 249 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Tuple = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 1 |
from ...configuration_utils import PretrainedConfig
A : Optional[int] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''tapas'''
def __init__(self : Optional[int] , _UpperCAmelCase : int=3_0522 , _UpperCAmelCase : str=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=1024 , _UpperCAmelCase : Any=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[str]=10.0 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[Any]=1.0 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str=False , _UpperCAmelCase : int=None , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any="ratio" , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_sizes
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ = positive_label_weight
lowercase__ = num_aggregation_labels
lowercase__ = aggregation_loss_weight
lowercase__ = use_answer_as_supervision
lowercase__ = answer_loss_importance
lowercase__ = use_normalized_answer_loss
lowercase__ = huber_loss_delta
lowercase__ = temperature
lowercase__ = aggregation_temperature
lowercase__ = use_gumbel_for_cells
lowercase__ = use_gumbel_for_aggregation
lowercase__ = average_approximation_function
lowercase__ = cell_selection_preference
lowercase__ = answer_loss_cutoff
lowercase__ = max_num_rows
lowercase__ = max_num_columns
lowercase__ = average_logits_per_cell
lowercase__ = select_one_column
lowercase__ = allow_empty_column_selection
lowercase__ = init_cell_selection_weights_to_zero
lowercase__ = reset_position_index_per_cell
lowercase__ = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ = aggregation_labels
lowercase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase ):
lowercase__ = {int(_UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 15 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A__ : List[str] = pytest.mark.integration
A__ : List[Any] = {"""comet"""}
A__ : str = importlib.util.find_spec("""fairseq""") is not None
A__ : str = {"""code_eval"""}
A__ : List[Any] = os.name == """nt"""
A__ : Optional[Any] = {"""bertscore""", """frugalscore""", """perplexity"""}
A__ : List[str] = importlib.util.find_spec("""transformers""") is not None
def _a ( __UpperCamelCase : Dict ):
@wraps(__UpperCamelCase )
def wrapper(self : Dict ,__UpperCamelCase : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( __UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def wrapper(self : Any ,__UpperCamelCase : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( __UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def wrapper(self : Optional[int] ,__UpperCamelCase : List[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( ):
lowerCAmelCase__ : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@local
class lowercase ( parameterized.TestCase ):
__a = {}
__a = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = '''[...]'''
lowerCAmelCase__ : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
lowerCAmelCase__ : Tuple = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ )
# check parameters
lowerCAmelCase__ : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ : Dict = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = '''[...]'''
lowerCAmelCase__ : List[str] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ : str = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ):
yield
else:
yield
@contextmanager
def lowercase_ ( self ):
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
lowerCAmelCase__ : Optional[Any] = load_local_metric
yield
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Any = contextmanager(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _a ( __UpperCamelCase : int ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class lowercase ( __UpperCamelCase ):
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowerCAmelCase__ : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _a ( __UpperCamelCase : str ):
import torch
def bert_cos_score_idf(__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,*__UpperCamelCase : str ,**__UpperCamelCase : List[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowerCAmelCase__ : List[str] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _a ( __UpperCamelCase : Tuple ):
def load_from_checkpoint(__UpperCamelCase : Any ):
class lowercase :
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE__ ) == 2
lowerCAmelCase__ : Dict = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowerCAmelCase__ : str = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowerCAmelCase__ : Optional[Any] = load_from_checkpoint
yield
def _a ( ):
lowerCAmelCase__ : int = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
lowerCAmelCase__ : Dict = '''ERROR'''
lowerCAmelCase__ : Optional[int] = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
| 233 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_UpperCAmelCase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _lowerCamelCase ( _a , _a , _a , _a=None ):
"""simple docstring"""
_lowerCamelCase = XLNetConfig.from_json_file(__snake_case )
_lowerCamelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_lowerCamelCase = finetuning_task
_lowerCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase = XLNetForSequenceClassification(__snake_case )
elif "squad" in finetuning_task:
_lowerCamelCase = finetuning_task
_lowerCamelCase = XLNetForQuestionAnswering(__snake_case )
else:
_lowerCamelCase = XLNetLMHeadModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
_lowerCamelCase = os.path.join(__snake_case , __snake_case )
_lowerCamelCase = os.path.join(__snake_case , __snake_case )
print(F'''Save PyTorch model to {os.path.abspath(__snake_case )}''' )
torch.save(model.state_dict() , __snake_case )
print(F'''Save configuration file to {os.path.abspath(__snake_case )}''' )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_UpperCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 707 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = ["input_features"]
def __init__( self , a__=80 , a__=1_60_00 , a__=1_60 , a__=30 , a__=4_00 , a__=0.0 , a__=False , **a__ , ):
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_lowerCamelCase = n_fft
_lowerCamelCase = hop_length
_lowerCamelCase = chunk_length
_lowerCamelCase = chunk_length * sampling_rate
_lowerCamelCase = self.n_samples // hop_length
_lowerCamelCase = sampling_rate
_lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a__ , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = spectrogram(
a__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
_lowerCamelCase = log_spec[:, :-1]
_lowerCamelCase = np.maximum(a__ , log_spec.max() - 8.0 )
_lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _UpperCAmelCase ( a__ , a__ , a__ = 0.0 ):
if attention_mask is not None:
_lowerCamelCase = np.array(a__ , np.intaa )
_lowerCamelCase = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowerCamelCase = padding_value
normed_input_values.append(a__ )
else:
_lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , a__ , a__ = True , a__ = None , a__ = None , a__ = None , a__ = "max_length" , a__ = None , a__ = None , a__ = None , **a__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_lowerCamelCase = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase = [np.asarray([raw_speech] ).T]
_lowerCamelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
_lowerCamelCase = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
_lowerCamelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
_lowerCamelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
_lowerCamelCase = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_lowerCamelCase = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowerCamelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_lowerCamelCase = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def _UpperCAmelCase ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 297 | 0 |
from __future__ import annotations
_lowerCamelCase = 10
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
SCREAMING_SNAKE_CASE__ = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
SCREAMING_SNAKE_CASE__ = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
SCREAMING_SNAKE_CASE__ = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
SCREAMING_SNAKE_CASE__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 1 |
'''simple docstring'''
import pprint
import requests
a : int = '''https://zenquotes.io/api'''
def __UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
a : str = random_quotes()
pprint.pprint(response)
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'owlvit_text_model'
def __init__(self , A=49_408 , A=512 , A=2_048 , A=12 , A=8 , A=16 , A="quick_gelu" , A=1E-5 , A=0.0 , A=0.02 , A=1.0 , A=0 , A=49_406 , A=49_407 , **A , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = max_position_embeddings
_a = hidden_act
_a = layer_norm_eps
_a = attention_dropout
_a = initializer_range
_a = initializer_factor
@classmethod
def a__ (cls , A , **A ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A )
_a , _a = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'owlvit_vision_model'
def __init__(self , A=768 , A=3_072 , A=12 , A=12 , A=3 , A=768 , A=32 , A="quick_gelu" , A=1E-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**A )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = image_size
_a = patch_size
_a = hidden_act
_a = layer_norm_eps
_a = attention_dropout
_a = initializer_range
_a = initializer_factor
@classmethod
def a__ (cls , A , **A ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A )
_a , _a = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 'owlvit'
__lowerCamelCase : List[str] = True
def __init__(self , A=None , A=None , A=512 , A=2.6592 , A=True , **A , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A )
if text_config is None:
_a = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_a = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_a = OwlViTTextConfig(**A )
_a = OwlViTVisionConfig(**A )
_a = projection_dim
_a = logit_scale_init_value
_a = return_dict
_a = 1.0
@classmethod
def a__ (cls , A , **A ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A )
_a , _a = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
@classmethod
def a__ (cls , A , A , **A ) -> Any:
"""simple docstring"""
_a = {}
_a = text_config
_a = vision_config
return cls.from_dict(A , **A )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.text_config.to_dict()
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-4
def a__ (self , A , A = -1 , A = -1 , A = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_a = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def a__ (self ) -> int:
"""simple docstring"""
return 14
| 11 |
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=1, snake_case__=False, **snake_case__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : Tuple = vocab_size
lowercase_ : Union[str, Any] = d_embed
lowercase_ : Optional[Any] = d_proj
lowercase_ : Optional[Any] = cutoffs + [vocab_size]
lowercase_ : Optional[int] = [0] + self.cutoffs
lowercase_ : Union[str, Any] = div_val
lowercase_ : Union[str, Any] = self.cutoffs[0]
lowercase_ : List[str] = len(self.cutoffs ) - 1
lowercase_ : List[str] = self.shortlist_size + self.n_clusters
lowercase_ : Tuple = keep_order
lowercase_ : Dict = []
lowercase_ : int = []
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
if self.n_clusters > 0:
lowercase_ : List[str] = self.add_weight(
shape=(self.n_clusters, self.d_embed), initializer="""zeros""", trainable=snake_case__, name="""cluster_weight""" )
lowercase_ : List[Any] = self.add_weight(
shape=(self.n_clusters,), initializer="""zeros""", trainable=snake_case__, name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase_ : Any = self.add_weight(
shape=(self.d_embed, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""", )
self.out_projs.append(snake_case__ )
else:
self.out_projs.append(snake_case__ )
lowercase_ : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", )
lowercase_ : List[str] = self.add_weight(
shape=(self.vocab_size,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ : Union[str, Any] = self.d_embed // (self.div_val**i)
lowercase_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""" )
self.out_projs.append(snake_case__ )
lowercase_ : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", )
lowercase_ : Tuple = self.add_weight(
shape=(r_idx - l_idx,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", )
self.out_layers.append((weight, bias) )
super().build(snake_case__ )
@staticmethod
def snake_case__ ( snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = x
if proj is not None:
lowercase_ : List[Any] = tf.einsum("""ibd,ed->ibe""", snake_case__, snake_case__ )
return tf.einsum("""ibd,nd->ibn""", snake_case__, snake_case__ ) + b
@staticmethod
def snake_case__ ( snake_case__, snake_case__ ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = shape_list(snake_case__ )
lowercase_ : Optional[Any] = tf.range(lp_size[0], dtype=target.dtype )
lowercase_ : str = tf.stack([r, target], 1 )
return tf.gather_nd(snake_case__, snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__=True, snake_case__=False ) -> Dict:
"""simple docstring"""
lowercase_ : Any = 0
if self.n_clusters == 0:
lowercase_ : Union[str, Any] = self._logit(snake_case__, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] )
if target is not None:
lowercase_ : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case__, logits=snake_case__ )
lowercase_ : Optional[int] = tf.nn.log_softmax(snake_case__, axis=-1 )
else:
lowercase_ : int = shape_list(snake_case__ )
lowercase_ : List[Any] = []
lowercase_ : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase_ : Tuple = (target >= l_idx) & (target < r_idx)
lowercase_ : Union[str, Any] = tf.where(snake_case__ )
lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ ) - l_idx
if self.div_val == 1:
lowercase_ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
lowercase_ : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase_ : Dict = self.out_layers[i][0]
lowercase_ : int = self.out_layers[i][1]
if i == 0:
lowercase_ : Optional[int] = tf.concat([cur_W, self.cluster_weight], 0 )
lowercase_ : Optional[Any] = tf.concat([cur_b, self.cluster_bias], 0 )
lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[0] )
lowercase_ : List[str] = tf.nn.log_softmax(snake_case__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase_ : int = tf.boolean_mask(snake_case__, snake_case__ )
lowercase_ : Optional[int] = self._gather_logprob(snake_case__, snake_case__ )
else:
lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[i] )
lowercase_ : Dict = tf.nn.log_softmax(snake_case__ )
lowercase_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase_ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case__ )
if target is not None:
lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ )
lowercase_ : Optional[int] = tf.boolean_mask(snake_case__, snake_case__ )
lowercase_ : Union[str, Any] = self._gather_logprob(snake_case__, snake_case__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case__, -cur_logprob, shape_list(snake_case__ ) )
lowercase_ : List[str] = tf.concat(snake_case__, axis=-1 )
if target is not None:
if return_mean:
lowercase_ : Tuple = tf.reduce_mean(snake_case__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case__, name=self.name, aggregation="""mean""" if return_mean else """""" )
return out | 458 | 0 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a():
'''simple docstring'''
snake_case_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ = SelfOrganizingMap()
snake_case_ = 3
snake_case_ = 0.5
for _ in range(lowercase__ ):
for j in range(len(lowercase__ ) ):
# training sample
snake_case_ = training_samples[j]
# Compute the winning vector
snake_case_ = self_organizing_map.get_winner(lowercase__ , lowercase__ )
# Update the winning vector
snake_case_ = self_organizing_map.update(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# classify test sample
snake_case_ = [0, 0, 0, 1]
snake_case_ = self_organizing_map.get_winner(lowercase__ , lowercase__ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 706 |
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
snake_case_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=4 , A_=4 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = rotary_dim
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(A_ )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , A_ )
SCREAMING_SNAKE_CASE__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , attention_mask=A_ , past_key_values=outputs_cache.past_key_values , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(A_ )
SCREAMING_SNAKE_CASE__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , A_ )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=A_ , past_key_values=A_ , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=A_ , position_ids=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase__ : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FlaxGPTJModelTester(self )
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(A_ , A_ , A_ , A_ )
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
A_ , A_ , A_ , A_ )
@tooslow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE__ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=A_ , truncation=A_ )
SCREAMING_SNAKE_CASE__ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = model.config.eos_token_id
SCREAMING_SNAKE_CASE__ = jax.jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(A_ , A_ )
@is_pt_flax_cross_test
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(A_ , A_ )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(A_ , A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = pt_model_class(A_ ).eval()
SCREAMING_SNAKE_CASE__ = model_class(A_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , A_ )
SCREAMING_SNAKE_CASE__ = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(A_ , from_pt=A_ )
SCREAMING_SNAKE_CASE__ = fx_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(A_ , A_ )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(A_ , A_ )
SCREAMING_SNAKE_CASE__ = pt_model_class(A_ ).eval()
SCREAMING_SNAKE_CASE__ = model_class(A_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(A_ , fx_model.params )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A_ ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = pt_model_class.from_pretrained(A_ , from_flax=A_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model_loaded(**A_ ).to_tuple()
self.assertEqual(
len(A_ ) , len(A_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(A_ , A_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowercase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
| 100 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 384
if "tiny" in model_name:
A : Optional[int] = [3, 3, 9, 3]
A : List[str] = [96, 192, 384, 768]
if "small" in model_name:
A : List[Any] = [3, 3, 27, 3]
A : Any = [96, 192, 384, 768]
if "base" in model_name:
A : Union[str, Any] = [3, 3, 27, 3]
A : Optional[int] = [128, 256, 512, 1024]
A : str = 512
if "large" in model_name:
A : Any = [3, 3, 27, 3]
A : Optional[int] = [192, 384, 768, 1536]
A : str = 768
if "xlarge" in model_name:
A : str = [3, 3, 27, 3]
A : Tuple = [256, 512, 1024, 2048]
A : Optional[Any] = 1024
# set label information
A : Any = 150
A : Tuple = '''huggingface/label-files'''
A : Union[str, Any] = '''ade20k-id2label.json'''
A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
A : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
A : List[str] = {v: k for k, v in idalabel.items()}
A : Any = ConvNextConfig(
depths=snake_case__ , hidden_sizes=snake_case__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
A : Dict = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = dct.pop(snake_case__ )
A : Optional[Any] = val
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
A : Optional[Any] = model_name_to_url[model_name]
A : int = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''state_dict''']
A : Optional[int] = get_upernet_config(snake_case__ )
A : Optional[Any] = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A : Dict = state_dict.pop(snake_case__ )
if "bn" in key:
A : Optional[Any] = key.replace('''bn''' , '''batch_norm''' )
A : List[Any] = val
# rename keys
A : Optional[int] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
A : Dict = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
A : Optional[Any] = SegformerImageProcessor()
A : Tuple = processor(snake_case__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
A : List[Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
A : str = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
A : str = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
A : Tuple = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
A : str = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
A : Optional[int] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 343 |
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = ''''''
try:
with open(snake_case__ , '''rb''' ) as binary_file:
A : Optional[Any] = binary_file.read()
for dat in data:
A : Union[str, Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A, A : Union[str, Any] = '''''', ''''''
A : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : Dict = lexicon[curr_string]
result += last_match_id
A : Any = last_match_id + '''0'''
if math.loga(snake_case__ ).is_integer():
A : Optional[int] = {}
for curr_key in list(snake_case__ ):
A : Any = lexicon.pop(snake_case__ )
A : List[str] = new_lex
A : Dict = last_match_id + '''1'''
index += 1
A : List[str] = ''''''
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = 8
try:
with open(snake_case__ , '''wb''' ) as opened_file:
A : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A : Union[str, Any] = data_bits[counter:]
A : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = read_file_binary(snake_case__ )
A : Dict = remove_prefix(snake_case__ )
A : Union[str, Any] = decompress_data(snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : str , _snake_case : Dict=13 , _snake_case : Any=7 , _snake_case : Tuple=True , _snake_case : Optional[int]=True , _snake_case : Dict=True , _snake_case : Any=True , _snake_case : Union[str, Any]=99 , _snake_case : str=32 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=4 , _snake_case : Dict=37 , _snake_case : Optional[int]="gelu" , _snake_case : List[str]=0.1 , _snake_case : Tuple=0.1 , _snake_case : str=5_12 , _snake_case : Optional[int]=16 , _snake_case : int=2 , _snake_case : Dict=0.02 , _snake_case : int=False , _snake_case : Tuple=True , _snake_case : str="None" , _snake_case : Dict=3 , _snake_case : Dict=4 , _snake_case : str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Dict , _snake_case : str , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = TFDebertaVaModel(config=UpperCAmelCase_ )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase_ )
A__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Dict , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict , _snake_case : Dict , _snake_case : str , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Dict , _snake_case : str , _snake_case : Tuple , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
A__ : str = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Dict = False
A__ : List[str] = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = TFDebertaVaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def _a ( self : Any ):
"""simple docstring"""
pass
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
A__ = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
A__ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 9 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=7 )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
UpperCamelCase = "636036"
UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
UpperCamelCase = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
return result["workflow_runs"]
def lowerCamelCase__ ( UpperCAmelCase_ )-> Any:
"""simple docstring"""
UpperCamelCase = get_daily_ci_runs(UpperCAmelCase_ )
UpperCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCamelCase = workflow_run["id"]
break
return workflow_run_id
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = get_last_daily_ci_runs(UpperCAmelCase_ )
if workflow_run_id is not None:
UpperCamelCase = get_artifacts_links(worflow_run_id=UpperCAmelCase_ , token=UpperCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase_ , artifact_url=UpperCAmelCase_ , output_dir=UpperCAmelCase_ , token=UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = {}
for artifact_name in artifact_names:
UpperCamelCase = os.path.join(UpperCAmelCase_ , F"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase_ ):
UpperCamelCase = {}
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
with z.open(UpperCAmelCase_ ) as f:
UpperCamelCase = f.read().decode("UTF-8" )
return results
| 554 | 0 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if (
not isinstance(_lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
def __lowerCamelCase ( _lowercase ) -> int:
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = """ybelkada/fonts"""
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _lowercase ( __lowerCamelCase : Tuple ,__lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(__lowerCamelCase ,['''torch'''] )
_check_torch_version()
UpperCamelCase__ : int = image_tensor.unsqueeze(0 )
UpperCamelCase__ : int = torch.nn.functional.unfold(__lowerCamelCase ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
UpperCamelCase__ : Optional[int] = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,__lowerCamelCase ,__lowerCamelCase ,-1 )
UpperCamelCase__ : Any = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : int = 36 ,__lowerCamelCase : str = "black" ,__lowerCamelCase : str = "white" ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : Optional[bytes] = None ,__lowerCamelCase : Optional[str] = None ,) -> Image.Image:
'''simple docstring'''
requires_backends(__lowerCamelCase ,'''vision''' )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase__ : List[Any] = textwrap.TextWrapper(width=80 )
UpperCamelCase__ : str = wrapper.wrap(text=__lowerCamelCase )
UpperCamelCase__ : int = '''\n'''.join(__lowerCamelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase__ : str = io.BytesIO(__lowerCamelCase )
elif font_path is not None:
UpperCamelCase__ : Tuple = font_path
else:
UpperCamelCase__ : int = hf_hub_download(__lowerCamelCase ,'''Arial.TTF''' )
UpperCamelCase__ : List[Any] = ImageFont.truetype(__lowerCamelCase ,encoding='''UTF-8''' ,size=__lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase__ : Optional[Any] = ImageDraw.Draw(Image.new('''RGB''' ,(1, 1) ,__lowerCamelCase ) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = temp_draw.textbbox((0, 0) ,__lowerCamelCase ,__lowerCamelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase__ : Any = text_width + left_padding + right_padding
UpperCamelCase__ : Union[str, Any] = text_height + top_padding + bottom_padding
UpperCamelCase__ : List[str] = Image.new('''RGB''' ,(image_width, image_height) ,__lowerCamelCase )
UpperCamelCase__ : int = ImageDraw.Draw(__lowerCamelCase )
draw.text(xy=(left_padding, top_padding) ,text=__lowerCamelCase ,fill=__lowerCamelCase ,font=__lowerCamelCase )
return image
def _lowercase ( __lowerCamelCase : np.ndarray ,__lowerCamelCase : str ,**__lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(__lowerCamelCase ,'''vision''' )
# Convert to PIL image if necessary
UpperCamelCase__ : Any = to_pil_image(__lowerCamelCase )
UpperCamelCase__ : List[Any] = render_text(__lowerCamelCase ,**__lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = max(header_image.width ,image.width )
UpperCamelCase__ : Dict = int(image.height * (new_width / image.width) )
UpperCamelCase__ : Tuple = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase__ : Dict = Image.new('''RGB''' ,(new_width, new_height + new_header_height) ,'''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase__ : Dict = to_numpy_array(__lowerCamelCase )
if infer_channel_dimension_format(__lowerCamelCase ) == ChannelDimension.LAST:
UpperCamelCase__ : int = to_channel_dimension_format(__lowerCamelCase ,ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Tuple = ['flattened_patches']
def __init__( self : List[Any], __lowerCamelCase : bool = True, __lowerCamelCase : bool = True, __lowerCamelCase : Dict[str, int] = None, __lowerCamelCase : int = 20_48, __lowerCamelCase : bool = False, **__lowerCamelCase : Union[str, Any], ) -> None:
super().__init__(**__lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
UpperCamelCase__ : Any = do_normalize
UpperCamelCase__ : List[Any] = do_convert_rgb
UpperCamelCase__ : str = max_patches
UpperCamelCase__ : str = is_vqa
def __lowercase( self : Optional[int], __lowerCamelCase : np.ndarray, __lowerCamelCase : int, __lowerCamelCase : dict, **__lowerCamelCase : List[Any] ) -> np.ndarray:
requires_backends(self.extract_flattened_patches, '''torch''' )
_check_torch_version()
# convert to torch
UpperCamelCase__ : List[Any] = to_channel_dimension_format(__lowerCamelCase, ChannelDimension.FIRST )
UpperCamelCase__ : Any = torch.from_numpy(__lowerCamelCase )
UpperCamelCase__ ,UpperCamelCase__ : str = patch_size['''height'''], patch_size['''width''']
UpperCamelCase__ ,UpperCamelCase__ : Any = get_image_size(__lowerCamelCase )
# maximize scale s.t.
UpperCamelCase__ : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase__ : Union[str, Any] = max(min(math.floor(scale * image_height / patch_height ), __lowerCamelCase ), 1 )
UpperCamelCase__ : int = max(min(math.floor(scale * image_width / patch_width ), __lowerCamelCase ), 1 )
UpperCamelCase__ : int = max(num_feasible_rows * patch_height, 1 )
UpperCamelCase__ : str = max(num_feasible_cols * patch_width, 1 )
UpperCamelCase__ : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ), size=(resized_height, resized_width), mode='''bilinear''', align_corners=__lowerCamelCase, antialias=__lowerCamelCase, ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase__ : Dict = torch_extract_patches(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCamelCase__ : Tuple = patches.shape
UpperCamelCase__ : Dict = patches_shape[1]
UpperCamelCase__ : Dict = patches_shape[2]
UpperCamelCase__ : Optional[int] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase__ : Dict = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase__ : str = torch.arange(__lowerCamelCase ).reshape([rows, 1] ).repeat(1, __lowerCamelCase ).reshape([rows * columns, 1] )
UpperCamelCase__ : Optional[int] = torch.arange(__lowerCamelCase ).reshape([1, columns] ).repeat(__lowerCamelCase, 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase__ : Any = row_ids.to(torch.floataa )
UpperCamelCase__ : int = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ : Optional[Any] = torch.cat([row_ids, col_ids, patches], -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ : Optional[int] = torch.nn.functional.pad(__lowerCamelCase, [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase__ : Optional[Any] = to_numpy_array(__lowerCamelCase )
return result
def __lowercase( self : List[str], __lowerCamelCase : np.ndarray, __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **__lowerCamelCase : Optional[int] ) -> np.ndarray:
if image.dtype == np.uinta:
UpperCamelCase__ : Union[str, Any] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase__ : Dict = np.mean(__lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = np.std(__lowerCamelCase )
UpperCamelCase__ : int = max(__lowerCamelCase, 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCamelCase, mean=__lowerCamelCase, std=__lowerCamelCase, **__lowerCamelCase )
def __lowercase( self : Optional[int], __lowerCamelCase : ImageInput, __lowerCamelCase : Optional[str] = None, __lowerCamelCase : bool = None, __lowerCamelCase : Optional[bool] = None, __lowerCamelCase : Optional[int] = None, __lowerCamelCase : Optional[Dict[str, int]] = None, __lowerCamelCase : Optional[Union[str, TensorType]] = None, __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST, **__lowerCamelCase : Union[str, Any], ) -> ImageInput:
UpperCamelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ : Any = patch_size if patch_size is not None else self.patch_size
UpperCamelCase__ : Optional[Any] = max_patches if max_patches is not None else self.max_patches
UpperCamelCase__ : str = self.is_vqa
if kwargs.get('''data_format''', __lowerCamelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
UpperCamelCase__ : Optional[Any] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ : Optional[Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(__lowerCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
UpperCamelCase__ : List[str] = kwargs.pop('''font_bytes''', __lowerCamelCase )
UpperCamelCase__ : List[Any] = kwargs.pop('''font_path''', __lowerCamelCase )
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCamelCase__ : Optional[int] = [header_text] * len(__lowerCamelCase )
UpperCamelCase__ : str = [
render_header(__lowerCamelCase, header_text[i], font_bytes=__lowerCamelCase, font_path=__lowerCamelCase )
for i, image in enumerate(__lowerCamelCase )
]
if do_normalize:
UpperCamelCase__ : List[str] = [self.normalize(image=__lowerCamelCase ) for image in images]
# convert to torch tensor and permute
UpperCamelCase__ : Optional[Any] = [
self.extract_flattened_patches(image=__lowerCamelCase, max_patches=__lowerCamelCase, patch_size=__lowerCamelCase )
for image in images
]
# create attention mask in numpy
UpperCamelCase__ : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase__ : Tuple = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks}, tensor_type=__lowerCamelCase )
return encoded_outputs
| 344 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_SCREAMING_SNAKE_CASE : Any = """sshleifer/bart-tiny-random"""
_SCREAMING_SNAKE_CASE : List[str] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __lowercase( self : str ) -> Dict:
return AutoConfig.from_pretrained(__lowerCamelCase )
def __lowercase( self : Optional[int] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : Dict = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def __lowercase( self : List[Any] ) -> Optional[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : Optional[Any] = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
def __lowercase( self : str ) -> List[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : str = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def __lowercase( self : Union[str, Any] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : int = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def __lowercase( self : Union[str, Any] ) -> Tuple:
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=__lowerCamelCase, d=__lowerCamelCase )
| 344 | 1 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def UpperCAmelCase_ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 716 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowerCAmelCase : int = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__lowerCAmelCase : Optional[int] = logging.WARNING
def UpperCAmelCase_ ( ) -> Optional[Any]:
__lowercase : Any = os.getenv('''DATASETS_VERBOSITY''' , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def UpperCAmelCase_ ( ) -> str:
return __name__.split('''.''' )[0]
def UpperCAmelCase_ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ( ) -> None:
# Apply our default configuration to the library root logger.
__lowercase : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase_ ( ) -> None:
__lowercase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase_ ( __lowerCAmelCase = None ) -> logging.Logger:
if name is None:
__lowercase : Optional[Any] = _get_library_name()
return logging.getLogger(__lowerCAmelCase )
def UpperCAmelCase_ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ ( __lowerCAmelCase ) -> None:
_get_library_root_logger().setLevel(__lowerCAmelCase )
def UpperCAmelCase_ ( ) -> int:
return set_verbosity(__lowerCAmelCase )
def UpperCAmelCase_ ( ) -> List[str]:
return set_verbosity(__lowerCAmelCase )
def UpperCAmelCase_ ( ) -> Optional[int]:
return set_verbosity(__lowerCAmelCase )
def UpperCAmelCase_ ( ) -> Any:
return set_verbosity(__lowerCAmelCase )
def UpperCAmelCase_ ( ) -> None:
__lowercase : Tuple = False
def UpperCAmelCase_ ( ) -> None:
__lowercase : Dict = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , *_snake_case : List[Any] , **_snake_case : Dict ): # pylint: disable=unused-argument
__lowercase : Tuple = args[0] if args else None
def __iter__( self : Tuple ):
return iter(self._iterator )
def __getattr__( self : Optional[int] , _snake_case : Union[str, Any] ):
def empty_fn(*_snake_case : Tuple , **_snake_case : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
return self
def __exit__( self : Optional[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[Any] ):
return
__lowerCAmelCase : int = True
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : List[str] , *_snake_case : Dict , _snake_case : str=False , **_snake_case : List[str] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_snake_case , **_snake_case )
else:
return EmptyTqdm(*_snake_case , **_snake_case )
def snake_case_ ( self : Optional[Any] , *_snake_case : str , **_snake_case : List[str] ):
__lowercase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_snake_case , **_snake_case )
def snake_case_ ( self : Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase : Tuple = _tqdm_cls()
def UpperCAmelCase_ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
global _tqdm_active
__lowercase : Dict = True
def UpperCAmelCase_ ( ) -> Tuple:
global _tqdm_active
__lowercase : Optional[int] = False
| 284 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Union[tf.Tensor, np.ndarray] ) ->List[int]:
if isinstance(a , np.ndarray ):
return list(tensor.shape )
snake_case = tf.shape(a )
if tensor.shape == tf.TensorShape(a ):
return dynamic
snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a )]
def __UpperCamelCase ( a : tf.Tensor , a : Optional[int] = None , a : Optional[str] = None ) ->tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=a , name=a )
def __UpperCamelCase ( a : List[str] , a : Union[str, Any] , a : Tuple , a : List[str]=1e-5 , a : Any=-1 ) ->Dict:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a , a ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
snake_case , snake_case = tf.nn.moments(a , axes=[axis] , keepdims=a )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case = [1] * inputs.shape.rank
snake_case = shape_list(a )[axis]
snake_case = tf.reshape(a , a )
snake_case = tf.reshape(a , a )
# Compute layer normalization using the batch_normalization
# function.
snake_case = tf.nn.batch_normalization(
a , a , a , offset=a , scale=a , variance_epsilon=a , )
return outputs
def __UpperCamelCase ( a : Tuple , a : Union[str, Any]=0 , a : List[str]=-1 ) ->int:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case = tf.shape(a )
snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a , a )
def __UpperCamelCase ( a : tf.Tensor ) ->tf.Tensor:
if not isinstance(a , tf.Tensor ):
snake_case = tf.convert_to_tensor(a ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCamelCase ( a : tf.Tensor , a : int , a : str = "input_ids" ) ->None:
tf.debugging.assert_less(
a , tf.cast(a , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(a )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Tuple ) ->Dict:
snake_case = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case = [x for x in data if len(a ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
snake_case = np.asarray(a )
snake_case = 1
snake_case = np.array_split(a , a )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case = np.array_split(a , a )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a ):
snake_case = chunk_data
else:
snake_case = data
def __UpperCamelCase ( a : Optional[int] , a : Tuple ) ->Tuple:
if name in group.attrs:
snake_case = [n.decode('''utf8''' ) if hasattr(a , '''decode''' ) else n for n in group.attrs[name]]
else:
snake_case = []
snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(a , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCamelCase ( a : Any ) ->List[Any]:
def _expand_single_ad_tensor(a : List[Any] ):
if isinstance(a , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a )
| 342 |
'''simple docstring'''
_lowercase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def __UpperCamelCase ( a : str , a : str , a : float ) ->float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(a )}"""
)
raise ValueError(a )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Any = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = ['note_seq']
def __init__( self : List[Any] , *snake_case : Any , **snake_case : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def _UpperCAmelCase ( cls : List[str] , *snake_case : Optional[int] , **snake_case : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *snake_case : Optional[int] , **snake_case : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 147 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoencoderKL
_snake_case : str = 'sample'
_snake_case : Union[str, Any] = 1e-2
@property
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase__ )
return {"sample": image}
@property
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.model_class(**lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
assert not model.is_gradient_checkpointing and model.training
_UpperCamelCase = model(**lowerCAmelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCamelCase = torch.randn_like(lowerCAmelCase__ )
_UpperCamelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCamelCase = self.model_class(**lowerCAmelCase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCamelCase = model_a(**lowerCAmelCase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCamelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
_UpperCamelCase = dict(model.named_parameters() )
_UpperCamelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase__ )
_UpperCamelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
_UpperCamelCase = model.to(lowerCAmelCase__ )
model.eval()
if torch_device == "mps":
_UpperCamelCase = torch.manual_seed(0 )
else:
_UpperCamelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCamelCase = image.to(lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ , sample_posterior=lowerCAmelCase__ , generator=lowerCAmelCase__ ).sample
_UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCamelCase = torch.tensor(
[
-4.0_0_7_8e-0_1,
-3.8_3_2_3e-0_4,
-1.2_6_8_1e-0_1,
-1.1_4_6_2e-0_1,
2.0_0_9_5e-0_1,
1.0_8_9_3e-0_1,
-8.8_2_4_7e-0_2,
-3.0_3_6_1e-0_1,
-9.8_6_4_4e-0_3,
] )
elif torch_device == "cpu":
_UpperCamelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_UpperCamelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-2 ) )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase__ ) for s in shape] )}.npy"""
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : int=(4, 3, 512, 512) , lowerCAmelCase__ : Tuple=False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = torch.floataa if fpaa else torch.floataa
_UpperCamelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) ).to(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return image
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any]="CompVis/stable-diffusion-v1-4" , lowerCAmelCase__ : Dict=False ) -> str:
'''simple docstring'''
_UpperCamelCase = '''fp16''' if fpaa else None
_UpperCamelCase = torch.floataa if fpaa else torch.floataa
_UpperCamelCase = AutoencoderKL.from_pretrained(
lowerCAmelCase__ , subfolder='''vae''' , torch_dtype=lowerCAmelCase__ , revision=lowerCAmelCase__ , )
model.to(lowerCAmelCase__ ).eval()
return model
def snake_case__ ( self : int , lowerCAmelCase__ : Tuple=0 ) -> List[Any]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase__ )
return torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ )
_UpperCamelCase = self.get_generator(lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ , generator=lowerCAmelCase__ , sample_posterior=lowerCAmelCase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model(fpaa=lowerCAmelCase__ )
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
_UpperCamelCase = self.get_generator(lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ , generator=lowerCAmelCase__ , sample_posterior=lowerCAmelCase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCamelCase = model.decode(lowerCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCamelCase = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model(fpaa=lowerCAmelCase__ )
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model.decode(lowerCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_UpperCamelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model(fpaa=lowerCAmelCase__ )
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model.decode(lowerCAmelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase = model.decode(lowerCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCamelCase = model.decode(lowerCAmelCase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase = model.decode(lowerCAmelCase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_sd_vae_model()
_UpperCamelCase = self.get_sd_image(lowerCAmelCase__ )
_UpperCamelCase = self.get_generator(lowerCAmelCase__ )
with torch.no_grad():
_UpperCamelCase = model.encode(lowerCAmelCase__ ).latent_dist
_UpperCamelCase = dist.sample(generator=lowerCAmelCase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCamelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCamelCase = torch.tensor(lowerCAmelCase__ )
_UpperCamelCase = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=lowerCAmelCase__ )
| 98 |
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ :list[int | str] ):
create_state_space_tree(__magic_name__ , [] , 0 , [0 for i in range(len(__magic_name__ ) )] )
def _lowerCAmelCase ( __magic_name__ :list[int | str] , __magic_name__ :list[int | str] , __magic_name__ :int , __magic_name__ :list[int] , ):
if index == len(__magic_name__ ):
print(__magic_name__ )
return
for i in range(len(__magic_name__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase_ = True
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 , __magic_name__ )
current_sequence.pop()
UpperCAmelCase_ = False
_lowerCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 121 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A_ = logging.get_logger(__name__)
A_ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = '''marian'''
_snake_case = ['''past_key_values''']
_snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , SCREAMING_SNAKE_CASE=5_81_01 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10_24 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=40_96 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=40_96 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=10_24 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=5_81_00 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=5_81_00 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Dict:
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = decoder_vocab_size or vocab_size
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Optional[int] = d_model
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : Union[str, Any] = encoder_attention_heads
__lowerCAmelCase : Dict = decoder_ffn_dim
__lowerCAmelCase : Dict = decoder_layers
__lowerCAmelCase : Dict = decoder_attention_heads
__lowerCAmelCase : Dict = dropout
__lowerCAmelCase : Optional[Any] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Tuple = activation_function
__lowerCAmelCase : Optional[int] = init_std
__lowerCAmelCase : int = encoder_layerdrop
__lowerCAmelCase : Optional[int] = decoder_layerdrop
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : int = encoder_layers
__lowerCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( a ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCAmelCase : Tuple = {0: 'batch'}
__lowerCAmelCase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
__lowerCAmelCase : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase : str = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = {0: 'batch', 2: 'past_sequence + sequence'}
__lowerCAmelCase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowerCAmelCase : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : Union[str, Any] = super().outputs
else:
__lowerCAmelCase : str = super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
__lowerCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
__lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__lowerCAmelCase : Union[str, Any] = seq_length if not self.use_past else 1
__lowerCAmelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase : int = dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : Tuple = common_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = common_inputs['decoder_input_ids'].shape[1]
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.num_attention_heads
__lowerCAmelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase : Union[str, Any] = decoder_seq_length + 3
__lowerCAmelCase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase : Union[str, Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
__lowerCAmelCase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.num_layers
__lowerCAmelCase : Any = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
__lowerCAmelCase : List[Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__lowerCAmelCase : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
__lowerCAmelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase : Tuple = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.num_layers
__lowerCAmelCase , __lowerCAmelCase : Any = self.num_attention_heads
__lowerCAmelCase : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase : Tuple = common_inputs['attention_mask'].dtype
__lowerCAmelCase : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
__lowerCAmelCase : Optional[int] = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase : Dict = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase : int = dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : Union[str, Any] = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Optional[int] = super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def snake_case ( self ) -> float:
return 1e-4
| 123 |
'''simple docstring'''
from math import sqrt
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase : Any = False
for divisor in range(2 ,int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase : int = False
break
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'status' must been from type bool"
return status
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase : Optional[int] = list(range(2 ,n + 1 ) )
__lowerCAmelCase : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 ,len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase : Dict = 0
# filters actual prime numbers.
__lowerCAmelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase : Any = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Tuple = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase : int = 0
# prime factorization of 'number'
__lowerCAmelCase : Optional[Any] = prime_factorization(_UpperCAmelCase )
__lowerCAmelCase : str = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase : List[Any] = 0
# prime factorization of 'number'
__lowerCAmelCase : Dict = prime_factorization(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,_UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A ( _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,_UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A ( _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
__lowerCAmelCase : Any = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase : List[Any] = get_prime_numbers(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
# run variable for while-loops.
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Union[str, Any] = None
# exit variable. for break up the loops
__lowerCAmelCase : List[str] = True
while i < len_pn and loop:
__lowerCAmelCase : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase : List[Any] = 0
while numbera != 0:
__lowerCAmelCase : Tuple = numbera % numbera
__lowerCAmelCase : str = numbera
__lowerCAmelCase : Optional[int] = rest
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> str:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase : Tuple = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase : Dict = prime_factorization(_UpperCAmelCase )
__lowerCAmelCase : str = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = max(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase : Tuple = prime_fac_a.count(_UpperCAmelCase )
__lowerCAmelCase : Dict = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase ,_UpperCAmelCase ) ):
ans *= n
else:
__lowerCAmelCase : int = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase : Optional[int] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase : Dict = p_number_a + 1 # jump to the next number
__lowerCAmelCase : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase : str = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase : List[str] = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase : str = gcd(abs(_UpperCAmelCase ) ,abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A ( _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def A ( _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Any = 1 # this will be return
for _ in range(n - 1 ):
__lowerCAmelCase : Optional[Any] = ans
ans += fiba
__lowerCAmelCase : Dict = tmp
return ans
| 123 | 1 |
'''simple docstring'''
from functools import lru_cache
def a_ ( _lowerCAmelCase ) -> set:
__lowerCamelCase : int = 2
__lowerCamelCase : List[str] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCAmelCase )
if n > 1:
factors.add(_lowerCAmelCase )
return factors
@lru_cache
def a_ ( _lowerCAmelCase ) -> int:
return len(unique_prime_factors(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase ) -> bool:
return len(set(_lowerCAmelCase ) ) in (0, 1)
def a_ ( _lowerCAmelCase ) -> list:
__lowerCamelCase : Union[str, Any] = 2
while True:
# Increment each value of a generated range
__lowerCamelCase : Tuple = [base + i for i in range(_lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase : Any = [upf_len(_lowerCAmelCase ) for x in group]
checker.append(_lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def a_ ( _lowerCAmelCase = 4 ) -> int:
__lowerCamelCase : List[Any] = run(_lowerCAmelCase )
return results[0] if len(_lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 459 |
'''simple docstring'''
import functools
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int:
# Validation
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
__lowerCamelCase : int = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 459 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : int , snake_case : int , snake_case : List[Any]=2 , snake_case : List[str]=8 , snake_case : str=True , snake_case : Tuple=True , snake_case : Any=True , snake_case : Any=True , snake_case : Optional[Any]=99 , snake_case : Dict=16 , snake_case : Union[str, Any]=5 , snake_case : str=2 , snake_case : int=36 , snake_case : str="gelu" , snake_case : Optional[int]=0.0 , snake_case : int=0.0 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Dict=0.02 , snake_case : Any=3 , snake_case : Dict=4 , snake_case : Optional[int]=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : int = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : List[str] = scope
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : str = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = self.get_config()
__UpperCAmelCase : Union[str, Any] = 300
return config
def lowerCamelCase__ ( self : str ) -> List[str]:
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = MraModel(config=_a )
model.to(_a )
model.eval()
__UpperCAmelCase : Tuple = model(_a , attention_mask=_a , token_type_ids=_a )
__UpperCAmelCase : Tuple = model(_a , token_type_ids=_a )
__UpperCAmelCase : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , snake_case : str , snake_case : Optional[int] , snake_case : Any , snake_case : Any , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : int , ) -> Tuple:
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[str] = MraModel(_a )
model.to(_a )
model.eval()
__UpperCAmelCase : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__UpperCAmelCase : List[Any] = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__UpperCAmelCase : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , snake_case : int , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple , snake_case : Tuple , snake_case : str , snake_case : Dict ) -> Optional[int]:
__UpperCAmelCase : Dict = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : int , snake_case : List[Any] , snake_case : str ) -> Dict:
__UpperCAmelCase : List[Any] = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict , snake_case : Dict , snake_case : List[str] , snake_case : str , snake_case : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict ) -> Optional[int]:
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Tuple = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__UpperCAmelCase : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Any , snake_case : str , snake_case : str , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : List[Any] = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__UpperCAmelCase : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Any , snake_case : Tuple , snake_case : List[str] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : int , snake_case : str , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.num_choices
__UpperCAmelCase : str = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__UpperCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Dict = ()
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = MraModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=_a , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*_a )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def lowerCamelCase__ ( self : List[str] ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase__ ( self : List[Any] ) -> int:
return
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase : Dict = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__UpperCAmelCase : Optional[int] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(_a )[0]
__UpperCAmelCase : Dict = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__UpperCAmelCase : Any = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Any:
__UpperCAmelCase : Union[str, Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__UpperCAmelCase : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(_a )[0]
__UpperCAmelCase : Optional[Any] = 5_0265
__UpperCAmelCase : Optional[int] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__UpperCAmelCase : List[str] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[str] = model(_a )[0]
__UpperCAmelCase : Optional[int] = 5_0265
__UpperCAmelCase : int = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _a )
__UpperCAmelCase : List[str] = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) ) | 721 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCAmelCase :str = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__UpperCAmelCase :Tuple = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__UpperCAmelCase :List[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : Dict ) -> str:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowerCamelCase__ ( self : Any , snake_case : str , snake_case : int , snake_case : int = CHRF.CHAR_ORDER , snake_case : int = CHRF.WORD_ORDER , snake_case : int = CHRF.BETA , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , ) -> Optional[int]:
__UpperCAmelCase : Any = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__UpperCAmelCase : int = [[refs[i] for refs in references] for i in range(snake_case )]
__UpperCAmelCase : int = CHRF(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
__UpperCAmelCase : Dict = sb_chrf.corpus_score(snake_case , snake_case )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 266 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__magic_name__ : Tuple = logging.getLogger(__name__)
__magic_name__ : List[str] = """pytorch_model.bin"""
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCAmelCase__ : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCAmelCase__ : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCAmelCase__ : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCAmelCase__ : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=__UpperCamelCase , metadata={'''help''': '''Random seed for initialization.'''} , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_snake_case = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_snake_case = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
_snake_case = dataset.sort("probability" , reverse=SCREAMING_SNAKE_CASE__ )
_snake_case = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
_snake_case = dataset.remove_columns(["label", "probability"] )
_snake_case = dataset.rename_column("prediction" , "label" )
_snake_case = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
_snake_case = dataset.shuffle(seed=args.seed )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_snake_case = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
_snake_case = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
_snake_case = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
_snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
_snake_case = {}
_snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_snake_case = args.train_file
_snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_snake_case = args.eval_file
for key in data_files:
_snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_snake_case = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_snake_case = f'''{args.output_dir}/self-train_iter-{{}}'''.format
_snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
_snake_case = None
_snake_case = None
_snake_case = 0
_snake_case = False
# Show the progress bar
_snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_snake_case = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "stage-1" )
_snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "best-checkpoint" , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "best-checkpoint" )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "stage-2" )
# Update arguments_dict
_snake_case = model_path
_snake_case = data_files["train"]
_snake_case = current_output_dir
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "best-checkpoint" , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE__ )
_snake_case = iteration
_snake_case = data_dir_format(iteration + 1 )
_snake_case = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , "best-checkpoint" ) )
_snake_case = config.idalabel
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "eval_results_best-checkpoint.json" )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "test_results_best-checkpoint.json" )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
_snake_case = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
_snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_snake_case = eval_result
if best_iteration is None:
_snake_case = new_iteration
_snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_snake_case = new_iteration
_snake_case = new_eval_result
_snake_case = 0
else:
if new_eval_result == best_eval_result:
_snake_case = new_iteration
_snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , SCREAMING_SNAKE_CASE__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , "eval_results_best-iteration.json" ) , )
| 368 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# initialize interval's left pointer and right pointer
_snake_case , _snake_case = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_snake_case = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_snake_case = min_edge
while go_next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_snake_case , _snake_case = i, i + z_result[i] - 1
return z_result
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE__ ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_snake_case = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """maskformer-swin"""
a__ : Dict = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowercase=224 , __lowercase=4 , __lowercase=3 , __lowercase=96 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 12, 24] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.02 , __lowercase=1E-5 , __lowercase=None , __lowercase=None , **__lowercase , ) -> Optional[Any]:
super().__init__(**__lowercase)
__UpperCamelCase :List[str] = image_size
__UpperCamelCase :Optional[int] = patch_size
__UpperCamelCase :List[Any] = num_channels
__UpperCamelCase :Tuple = embed_dim
__UpperCamelCase :Dict = depths
__UpperCamelCase :Tuple = len(__lowercase)
__UpperCamelCase :int = num_heads
__UpperCamelCase :Union[str, Any] = window_size
__UpperCamelCase :List[str] = mlp_ratio
__UpperCamelCase :str = qkv_bias
__UpperCamelCase :Union[str, Any] = hidden_dropout_prob
__UpperCamelCase :Optional[int] = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = drop_path_rate
__UpperCamelCase :List[Any] = hidden_act
__UpperCamelCase :str = use_absolute_embeddings
__UpperCamelCase :List[Any] = layer_norm_eps
__UpperCamelCase :Dict = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase :Tuple = int(embed_dim * 2 ** (len(__lowercase) - 1))
__UpperCamelCase :Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(__lowercase) + 1)]
__UpperCamelCase , __UpperCamelCase :Any = get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names)
| 167 | def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowercase = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 167 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase( lowerCamelCase ):
_snake_case = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=lowerCamelCase , default=lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = model
_snake_case = cache
_snake_case = force
_snake_case = trust_remote_code
def UpperCamelCase( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : Dict = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''mobilenet_v1'''
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.999 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = min_depth
_snake_case = hidden_act
_snake_case = tf_padding
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = version.parse('''1.11''' )
@property
def UpperCamelCase( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase( self ):
return 1e-4
| 368 | 1 |
import math
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowerCamelCase : Dict = range(3 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = factor * value
__lowerCamelCase : List[Any] = value
while not is_prime(lowerCamelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase__ )
return value
| 669 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
__UpperCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case_ ):
if isinstance(snake_case_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
_a = 10.0
_a = 4
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=snake_case_ , controlnet=snake_case_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = "evil space-punk bird"
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
_a = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
_a = pipe(
snake_case_ , snake_case_ , control_image=snake_case_ , generator=snake_case_ , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 131 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
__UpperCamelCase : int = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__UpperCamelCase : Union[str, Any] = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__UpperCamelCase : Tuple = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self : List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : int=None , _lowercase : int=False , _lowercase : Any=False , _lowercase : Tuple=False , ) -> List[str]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowercase = np.array([re.sub(_lowercase , "" , _lowercase ) for x in predictions] )
_lowercase = np.array([re.sub(_lowercase , "" , _lowercase ) for x in references] )
else:
_lowercase = np.asarray(_lowercase )
_lowercase = np.asarray(_lowercase )
if ignore_case:
_lowercase = np.char.lower(_lowercase )
_lowercase = np.char.lower(_lowercase )
if ignore_punctuation:
_lowercase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowercase = np.char.translate(_lowercase , table=_lowercase )
_lowercase = np.char.translate(_lowercase , table=_lowercase )
if ignore_numbers:
_lowercase = string.digits.maketrans("" , "" , string.digits )
_lowercase = np.char.translate(_lowercase , table=_lowercase )
_lowercase = np.char.translate(_lowercase , table=_lowercase )
_lowercase = predictions == references
return {"exact_match": np.mean(_lowercase ) * 1_0_0} | 227 | """simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCamelCase : List[str] = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCamelCase : Any = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : str ):
_lowercase = BertAbsConfig(
temp_dir=".", finetune_bert=_snake_case, large=_snake_case, share_emb=_snake_case, use_bert_emb=_snake_case, encoder="bert", max_pos=5_1_2, enc_layers=6, enc_hidden_size=5_1_2, enc_heads=8, enc_ff_size=5_1_2, enc_dropout=0.2, dec_layers=6, dec_hidden_size=7_6_8, dec_heads=8, dec_ff_size=2_0_4_8, dec_dropout=0.2, )
_lowercase = torch.load(_snake_case, lambda _snake_case, _snake_case : storage )
_lowercase = AbsSummarizer(_snake_case, torch.device("cpu" ), _snake_case )
original.eval()
_lowercase = BertAbsSummarizer(_snake_case, torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_lowercase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
_lowercase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowercase = encoder_input_ids
_lowercase = decoder_input_ids
_lowercase = _lowercase = None
_lowercase = None
_lowercase = _lowercase = None
_lowercase = _lowercase = None
_lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowercase = original(_snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = original.generator(_snake_case )
_lowercase = new_model(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = new_model.generator(_snake_case )
_lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.allclose(_snake_case, _snake_case, atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 227 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
from math import pi, sqrt, tan
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values")
return 6 * side_length**2
def lowerCamelCase__ ( _a , _a , _a):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values")
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values")
return 4 * pi * radius**2
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values")
return 3 * pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values")
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( _a , _a , _a):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values")
SCREAMING_SNAKE_CASE : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( _a , _a):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values")
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( _a , _a):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values")
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori")
return 4 * pow(_a , 2) * torus_radius * tube_radius
def lowerCamelCase__ ( _a , _a):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values")
return length * width
def lowerCamelCase__ ( _a):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values")
return side_length**2
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2
def lowerCamelCase__ ( _a , _a , _a):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values")
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle")
SCREAMING_SNAKE_CASE : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def lowerCamelCase__ ( _a , _a):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values")
return base * height
def lowerCamelCase__ ( _a , _a , _a):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values")
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( _a):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values")
return pi * radius**2
def lowerCamelCase__ ( _a , _a):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values")
return pi * radius_x * radius_y
def lowerCamelCase__ ( _a , _a):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values")
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( _a , _a):
if not isinstance(_a , _a) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides")
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side")
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''') | 25 | 0 |
def lowerCAmelCase ( snake_case__ : Optional[int] , snake_case__ : Tuple )-> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
A_ = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase ( snake_case__ : List[str] , snake_case__ : str )-> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
A_ = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
A_ = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase ( snake_case__ : int , snake_case__ : str )-> str:
if number >= 0: # Get binary representation of positive number
A_ = "0" + str(bin(__snake_case ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
A_ = len(bin(__snake_case )[3:] ) # Find 2's complement of number
A_ = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
A_ = (
"1" + "0" * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = ['GLPNFeatureExtractor']
__magic_name__ : Tuple = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 608 | 0 |
from __future__ import annotations
class A__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ):
a__, a__ : str = text, pattern
a__, a__ : Dict = len(__UpperCAmelCase ), len(__UpperCAmelCase )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase( self : int , lowerCamelCase__ : List[str] ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
a__ : List[Any] = self.mismatch_in_text(__UpperCAmelCase )
if mismatch_index == -1:
positions.append(__UpperCAmelCase )
else:
a__ : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
a__ : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCamelCase : List[str] = 'ABAABA'
UpperCamelCase : List[str] = 'AB'
UpperCamelCase : Any = BoyerMooreSearch(text, pattern)
UpperCamelCase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 37 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case : str = '>>zh<<'
snake_case : Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
snake_case : Optional[Any] = 'pt'
elif is_tf_available():
snake_case : Optional[int] = 'tf'
else:
snake_case : Optional[Any] = 'jax'
@require_sentencepiece
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase : Dict = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_=None )-> int:
'''simple docstring'''
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
UpperCamelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCamelCase = str(distributed_port + 1 )
UpperCamelCase = dist.new_group(ranks=A_ , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase_ ( self , A_ , A_ , A_=torch.floataa )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = torch.empty(A_ , dtype=A_ )
dist.scatter(A_ , src=0 , scatter_list=A_ , group=self.process_group )
return target_tensor
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCamelCase = next((addr for addr in addrs if addr.startswith('e' )) , A_ )
return ifname
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
UpperCamelCase , UpperCamelCase = self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
# distributed training
UpperCamelCase = dist.get_world_size(group=self.process_group )
# gather logic
UpperCamelCase = None
if self._is_main():
UpperCamelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A_ )]
dist.gather(torch.tensor(A_ ) , dst=0 , gather_list=A_ , group=self.process_group )
# scatter logic
UpperCamelCase = question_hidden_states.shape[0]
UpperCamelCase = []
UpperCamelCase = []
if self._is_main():
assert len(A_ ) == world_size
UpperCamelCase , UpperCamelCase = self._main_retrieve(torch.cat(A_ ).numpy() , A_ )
UpperCamelCase , UpperCamelCase = torch.tensor(A_ ), torch.tensor(A_ )
UpperCamelCase = self._chunk_tensor(A_ , A_ )
UpperCamelCase = self._chunk_tensor(A_ , A_ )
UpperCamelCase = self._scattered(A_ , [n_queries, n_docs] , target_type=torch.intaa )
UpperCamelCase = self._scattered(A_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A_ )
| 701 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def A_( A : np.ndarray , A : np.ndarray , A : np.ndarray , A : int , A : int):
UpperCamelCase = cva.getAffineTransform(A , A)
return cva.warpAffine(A , A , (rows, cols))
if __name__ == "__main__":
# read original image
lowerCAmelCase : int = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
lowerCAmelCase : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase , lowerCAmelCase : str = gray_img.shape
# set different points to rotate image
lowerCAmelCase : Dict = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
lowerCAmelCase : int = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
lowerCAmelCase : List[str] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
lowerCAmelCase : Union[str, Any] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
lowerCAmelCase : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase : int = plt.figure(1)
lowerCAmelCase : Optional[int] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 432 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> Optional[int]:
'''simple docstring'''
return str(__lowerCAmelCase ) == str(__lowerCAmelCase )[::-1]
def __lowerCAmelCase ( a_ ) -> Union[str, Any]:
'''simple docstring'''
return int(__lowerCAmelCase ) + int(str(__lowerCAmelCase )[::-1] )
def __lowerCAmelCase ( a_ = 1_0000 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = []
for num in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = num
while iterations < 50:
SCREAMING_SNAKE_CASE : Dict = sum_reverse(__lowerCAmelCase )
iterations += 1
if is_palindrome(__lowerCAmelCase ):
break
else:
lychrel_nums.append(__lowerCAmelCase )
return len(__lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__magic_name__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__magic_name__ = {
'''ctrl''': 256,
}
__magic_name__ = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = CONTROL_CODES
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<unk>" , **lowerCamelCase ):
super().__init__(unk_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
snake_case__ = json.load(lowerCamelCase )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
snake_case__ = merges_handle.read().split("\n" )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(lowerCamelCase )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
snake_case__ = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase ):
try:
snake_case__ = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(lowerCamelCase )
snake_case__ = new_word
if len(lowerCamelCase ) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase )
snake_case__ = "@@ ".join(lowerCamelCase )
snake_case__ = word[:-4]
snake_case__ = word
return word
def A_ ( self , lowerCamelCase ):
snake_case__ = []
snake_case__ = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def A_ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
snake_case__ = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
snake_case__ = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
snake_case__ = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 276 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
snake_case_ =logging.get_logger(__name__)
snake_case_ ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case_ ={
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
snake_case_ ={
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class __magic_name__ ( _UpperCAmelCase):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['input_ids', 'attention_mask']
_UpperCAmelCase : int = BartTokenizer
def __init__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : List[Any]=None ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : List[Any]=None ,__SCREAMING_SNAKE_CASE : Union[str, Any]="replace" ,__SCREAMING_SNAKE_CASE : Optional[int]="<s>" ,__SCREAMING_SNAKE_CASE : List[Any]="</s>" ,__SCREAMING_SNAKE_CASE : str="</s>" ,__SCREAMING_SNAKE_CASE : Optional[int]="<s>" ,__SCREAMING_SNAKE_CASE : Optional[int]="<unk>" ,__SCREAMING_SNAKE_CASE : int="<pad>" ,__SCREAMING_SNAKE_CASE : Optional[int]="<mask>" ,__SCREAMING_SNAKE_CASE : str=False ,__SCREAMING_SNAKE_CASE : Dict=True ,**__SCREAMING_SNAKE_CASE : Optional[int] ,):
super().__init__(
lowercase__ ,lowercase__ ,tokenizer_file=lowercase__ ,errors=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,sep_token=lowercase__ ,cls_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,mask_token=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ,**lowercase__ ,)
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,lowercase__ ) != add_prefix_space:
UpperCAmelCase = getattr(lowercase__ ,pre_tok_state.pop("type" ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowercase__ )
UpperCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase = "post_processor"
UpperCAmelCase = getattr(self.backend_tokenizer ,lowercase__ ,lowercase__ )
if tokenizer_component_instance:
UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase = tuple(state["cls"] )
UpperCAmelCase = False
if state.get("add_prefix_space" ,lowercase__ ) != add_prefix_space:
UpperCAmelCase = add_prefix_space
UpperCAmelCase = True
if state.get("trim_offsets" ,lowercase__ ) != trim_offsets:
UpperCAmelCase = trim_offsets
UpperCAmelCase = True
if changes_to_apply:
UpperCAmelCase = getattr(lowercase__ ,state.pop("type" ) )
UpperCAmelCase = component_class(**lowercase__ )
setattr(self.backend_tokenizer ,lowercase__ ,lowercase__ )
@property
def _UpperCAmelCase ( self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else value
UpperCAmelCase = value
def _UpperCAmelCase ( self : Optional[int] ,*__SCREAMING_SNAKE_CASE : int ,**__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = kwargs.get("is_split_into_words" ,lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase__ ,**lowercase__ )
def _UpperCAmelCase ( self : List[Any] ,*__SCREAMING_SNAKE_CASE : List[str] ,**__SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = kwargs.get("is_split_into_words" ,lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase__ ,**lowercase__ )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Union[str, Any] = None ):
UpperCAmelCase = self._tokenizer.model.save(lowercase__ ,name=lowercase__ )
return tuple(lowercase__ )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple=None ):
UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[Any] = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 702 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _a):
_UpperCAmelCase : Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase : str = 'Pix2StructImageProcessor'
_UpperCAmelCase : Any = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = False
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self : Any ,__SCREAMING_SNAKE_CASE : Optional[Any]=None ,__SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False ,__SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = 2_0_4_8 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase = self.tokenizer
UpperCAmelCase = self.tokenizer(
text=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,max_patches=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
UpperCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,max_patches=__SCREAMING_SNAKE_CASE ,header_text=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase = self.tokenizer(
text=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
if "attention_mask" in text_encoding:
UpperCAmelCase = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase = text_encoding.pop("input_ids" )
else:
UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(__SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _UpperCAmelCase ( self : Union[str, Any] ,*__SCREAMING_SNAKE_CASE : Union[str, Any] ,**__SCREAMING_SNAKE_CASE : str ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ,*__SCREAMING_SNAKE_CASE : Optional[int] ,**__SCREAMING_SNAKE_CASE : Dict ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 405 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
__a : str = 6
__a : int = 1_2_8
__a : Tuple = (2, 2, 1_8, 2)
__a : List[str] = (4, 8, 1_6, 3_2)
elif "large" in model_name:
__a : int = 1_2
__a : List[str] = 1_9_2
__a : Union[str, Any] = (2, 2, 1_8, 2)
__a : int = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__a : Tuple = window_size
__a : Any = embed_dim
__a : Tuple = depths
__a : Dict = num_heads
return config
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
if "encoder.mask_token" in name:
__a : Union[str, Any] = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__a : Any = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__a : List[str] = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__a : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__a : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__a : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__a : Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__a : str = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__a : Union[str, Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
__a : List[Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__a : str = '''swin.''' + name
return name
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ):
for key in orig_state_dict.copy().keys():
__a : Any = orig_state_dict.pop(lowercase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__a : Optional[Any] = key.split('.' )
__a : List[Any] = int(key_split[2] )
__a : Any = int(key_split[4] )
__a : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a : List[str] = val[:dim, :]
__a : Optional[int] = val[
dim : dim * 2, :
]
__a : Any = val[-dim:, :]
else:
__a : Any = val[
:dim
]
__a : Optional[int] = val[
dim : dim * 2
]
__a : List[Any] = val[
-dim:
]
else:
__a : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ):
__a : Dict = torch.load(lowercase__ , map_location='cpu' )['''model''']
__a : Tuple = get_swin_config(lowercase__ )
__a : Union[str, Any] = SwinForMaskedImageModeling(lowercase__ )
model.eval()
__a : Optional[Any] = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
__a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : List[Any] = ViTImageProcessor(size={'height': 1_9_2, 'width': 1_9_2} )
__a : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
__a : Union[str, Any] = image_processor(images=lowercase__ , return_tensors='pt' )
with torch.no_grad():
__a : int = model(**lowercase__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 47 |
import string
def _lowercase ( lowercase__ ):
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase : Any = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase : Union[str, Any] = string.ascii_uppercase.find(lowercase__ )
__lowerCAmelCase : int = num - key
if num < 0:
__lowerCAmelCase : List[str] = num + len(string.ascii_uppercase )
__lowerCAmelCase : Union[str, Any] = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase : Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def _lowercase ( ):
__lowerCAmelCase : str = input('''Encrypted message: ''' )
__lowerCAmelCase : List[Any] = message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 492 | 0 |
snake_case_ : List[Any] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
snake_case_ : Optional[Any] = ["a", "b", "c", "d", "e"]
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ : str = topological_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE_ )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ : Tuple = topological_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# return sort
return sort
if __name__ == "__main__":
snake_case_ : Dict = topological_sort("a", [], [])
print(sort)
| 703 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=12 , __magic_name__ : int=7 , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=99 , __magic_name__ : str=32 , __magic_name__ : Optional[Any]=32 , __magic_name__ : int=2 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=512 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : str=0 , __magic_name__ : Dict=None , ) -> Optional[Any]:
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : int = seq_length
lowerCamelCase_ : Optional[int] = is_training
lowerCamelCase_ : str = use_input_mask
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Optional[Any] = hidden_size
lowerCamelCase_ : str = projection_dim
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Optional[int] = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Optional[Any] = scope
lowerCamelCase_ : List[str] = bos_token_id
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = None
if self.use_input_mask:
lowerCamelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase_ : List[Any] = input_mask.numpy()
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = input_mask.shape
lowerCamelCase_ : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 0
lowerCamelCase_ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Any:
lowerCamelCase_ : Union[str, Any] = TFBlipTextModel(config=__magic_name__ )
lowerCamelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , training=__magic_name__ )
lowerCamelCase_ : Dict = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
lowerCamelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : List[str] = BlipTextModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = TFBlipTextModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Dict=True ) -> Union[str, Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=__magic_name__ )
| 253 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = BarthezTokenizer
a = BarthezTokenizerFast
a = True
a = True
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_A)
_SCREAMING_SNAKE_CASE : Dict = tokenizer
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = """<pad>"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(_A) , 1_0_1_1_2_2)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_SCREAMING_SNAKE_CASE : List[Any] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_SCREAMING_SNAKE_CASE : int = self.tokenizer(
_A , max_length=len(_A) , padding=_A , truncation=_A , return_tensors="""pt""")
self.assertIsInstance(_A , _A)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_SCREAMING_SNAKE_CASE : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.tokenize(_A)
self.assertListEqual(_A , _A)
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_A , add_special_tokens=_A)
_SCREAMING_SNAKE_CASE : int = rust_tokenizer.encode(_A , add_special_tokens=_A)
self.assertListEqual(_A , _A)
_SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(_A)
_SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(_A)
self.assertListEqual(_A , _A)
@slow
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_SCREAMING_SNAKE_CASE : Dict = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_A , )
| 338 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any]=3 , _A : Optional[int]=3_2 , _A : int=3 , _A : Optional[int]=1_0 , _A : str=[1_0, 2_0, 3_0, 4_0] , _A : int=[1, 1, 2, 1] , _A : int=True , _A : Optional[Any]=True , _A : Optional[Any]="relu" , _A : str=3 , _A : str=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : Any = embeddings_size
_SCREAMING_SNAKE_CASE : int = hidden_sizes
_SCREAMING_SNAKE_CASE : Optional[Any] = depths
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Dict = num_labels
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : Dict = len(_A)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_labels)
_SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = TFResNetModel(config=_A)
_SCREAMING_SNAKE_CASE : List[Any] = model(_A)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : Optional[Any] , _A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : List[str] = TFResNetForImageClassification(_A)
_SCREAMING_SNAKE_CASE : Dict = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = config_and_inputs
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = TFResNetModelTester(self)
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , has_text_modality=_A)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = model_class(_A)
_SCREAMING_SNAKE_CASE : Any = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
def check_hidden_states_output(_A : int , _A : Optional[int] , _A : Dict):
_SCREAMING_SNAKE_CASE : Tuple = model_class(_A)
_SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(_A , _A))
_SCREAMING_SNAKE_CASE : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_A) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE : Dict = layer_type
_SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(_A , _A , _A)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@slow
def _lowerCAmelCase ( self : int):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFResNetModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
_SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=_A , return_tensors="""tf""")
# forward pass
_SCREAMING_SNAKE_CASE : int = model(**_A)
# verify the logits
_SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4))
| 338 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase : Dict =[
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A__ ( __A , __A=None ):
'''simple docstring'''
require_version(deps[pkg] , __A )
| 15 | from __future__ import annotations
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = key
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : str , _UpperCamelCase : int) ->list[str]:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase) ^ key) for ch in content]
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->str:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase) ^ key)
return ans
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int = 0) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : int) ->bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase) and isinstance(_UpperCamelCase , _UpperCamelCase)
try:
with open(_UpperCamelCase) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 15 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def _snake_case ( self : Dict ):
super().setUp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _snake_case ( self : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def _snake_case ( self : Optional[int] ):
pass # TODO add if relevant
def _snake_case ( self : List[Any] ):
pass # TODO add if relevant
def _snake_case ( self : List[Any] ):
pass # TODO add if relevant
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : int ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Optional[Any] ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Union[str, Any] ):
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=__lowerCamelCase , normalize_text=__lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_sudachi
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_jumanpp
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def _snake_case ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self : str , **__lowerCamelCase : Optional[int] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _snake_case ( self : Tuple ):
pass # TODO add if relevant
def _snake_case ( self : Tuple ):
pass # TODO add if relevant
def _snake_case ( self : Any ):
pass # TODO add if relevant
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) ) | 16 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCAmelCase = 4
__lowerCAmelCase = 3
class lowerCamelCase ( __lowerCamelCase ):
pass
def a ( a ) ->Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(a ):
yield {"i": i, "shard": shard}
def a ( ) ->List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(os.environ['''RANK'''] )
SCREAMING_SNAKE_CASE = int(os.environ['''WORLD_SIZE'''] )
SCREAMING_SNAKE_CASE = ArgumentParser()
parser.add_argument('''--streaming''' , type=a )
parser.add_argument('''--local_rank''' , type=a )
parser.add_argument('''--num_workers''' , type=a , default=0 )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.streaming
SCREAMING_SNAKE_CASE = args.num_workers
SCREAMING_SNAKE_CASE = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(a )]}
SCREAMING_SNAKE_CASE = IterableDataset.from_generator(a , gen_kwargs=a )
if not streaming:
SCREAMING_SNAKE_CASE = Dataset.from_list(list(a ) )
SCREAMING_SNAKE_CASE = split_dataset_by_node(a , rank=a , world_size=a )
SCREAMING_SNAKE_CASE = torch.utils.data.DataLoader(a , num_workers=a )
SCREAMING_SNAKE_CASE = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main() | 201 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( a_ , a_ , a_ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError('You cannot supply more or less than 2 values')
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor')
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor')
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor')
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 607 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
snake_case_ = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('inf' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(a , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a , a , rtol=1E-12 )
tf.debugging.assert_equal(a , a )
@require_tf
class UpperCamelCase_ ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 2
snake_case_ = 2
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> Any:
super(a , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=a , )
def _UpperCamelCase ( self , a , a ) -> Optional[Any]:
snake_case_ = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_02, 1_03]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(a ).signatures['serving_default']
for batch_size in range(1 , len(a ) + 1 ):
snake_case_ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**a )['sequences']
snake_case_ = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
def _UpperCamelCase ( self ) -> Dict:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 1
snake_case_ = 2
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> int:
super(a , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=a , )
def _UpperCamelCase ( self , a , a ) -> Union[str, Any]:
snake_case_ = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_02, 1_03]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(a ).signatures['serving_default']
for input_row in range(len(a ) ):
snake_case_ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**a )['sequences']
snake_case_ = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
@require_tensorflow_text
def _UpperCamelCase ( self ) -> Any:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a )
class UpperCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Any:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a , 'spiece.model' ) , 'rb' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def _UpperCamelCase ( self , a , *a , **a ) -> int:
snake_case_ = self.tokenizer.tokenize(a )
snake_case_ , snake_case_ = text.pad_model_inputs(
a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=a , attention_mask=a )
return self.tokenizer.detokenize(a )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
snake_case_ = complete_model(a )
snake_case_ = tf.keras.Model(a , a )
keras_model.save(a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
snake_case_ = 14
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 'Hello, my dog is cute and'
snake_case_ = tokenizer(a , return_tensors='tf' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCamelCase ( self ) -> Any:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = 'Hugging Face is a technology company based in New York and Paris.'
snake_case_ = bart_tokenizer(a , return_tensors='tf' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(a ).numpy()
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a=None , **a ) -> List[str]:
return super().call(a , **a )
snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(a , foo='bar' ).numpy()
self.assertTrue(np.array_equal(a , a ) )
class UpperCamelCase_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , **a ) -> List[Any]:
return super().call(a , **a )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(a ).numpy()
with self.assertRaises(a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a , foo='bar' )
| 607 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE :List[str] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : Optional[Any]=None , __lowercase : Optional[int]=None , __lowercase : Optional[Any]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=False , snake_case_ : Optional[int]=9_9 , snake_case_ : Any=1_6 , snake_case_ : Any=2 , snake_case_ : Union[str, Any]=4 , snake_case_ : Any=4 , snake_case_ : List[str]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Tuple=3_2 , snake_case_ : Optional[int]=2 , snake_case_ : int=1 , snake_case_ : str=0 , snake_case_ : Union[str, Any]=0.0_2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = initializer_range
def lowercase ( self : Tuple ):
_UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCAmelCase = shift_tokens_right(snake_case_ , 1 , 2 )
_UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case_ , )
_UpperCAmelCase = prepare_blenderbot_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = 2_0
_UpperCAmelCase = model_class_name(snake_case_ )
_UpperCAmelCase = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=snake_case_ , decoder_position_ids=snake_case_ , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case_ , )
_UpperCAmelCase = model.decode(snake_case_ , snake_case_ )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def lowercase ( self : Tuple , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Dict ):
_UpperCAmelCase = 2_0
_UpperCAmelCase = model_class_name(snake_case_ )
_UpperCAmelCase = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , snake_case_ , snake_case_ )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , snake_case_ , decoder_attention_mask=snake_case_ , past_key_values=snake_case_ , decoder_position_ids=snake_case_ , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , snake_case_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case_ , decoder_position_ids=snake_case_ , )
_UpperCAmelCase = model.decode(snake_case_ , snake_case_ , decoder_attention_mask=snake_case_ )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class A_ ( unittest.TestCase ):
_lowerCamelCase : str = 99
def lowercase ( self : List[Any] ):
_UpperCAmelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._get_config_and_data()
_UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(snake_case_ )
_UpperCAmelCase = lm_model(input_ids=snake_case_ )
_UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(snake_case_ )
_UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_UpperCAmelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_UpperCAmelCase = lm_model(input_ids=snake_case_ , decoder_input_ids=snake_case_ )
_UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_UpperCAmelCase = shift_tokens_right(snake_case_ , 1 , 2 )
_UpperCAmelCase = np.equal(snake_case_ , 1 ).astype(np.floataa ).sum()
_UpperCAmelCase = np.equal(snake_case_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( _lowercase , unittest.TestCase , _lowercase ):
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowerCamelCase : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase ( self : Any ):
_UpperCAmelCase = FlaxBlenderbotModelTester(self )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = model_class(snake_case_ )
@jax.jit
def encode_jitted(snake_case_ : str , snake_case_ : List[Any]=None , **snake_case_ : Union[str, Any] ):
return model.encode(input_ids=snake_case_ , attention_mask=snake_case_ )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase = encode_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[Any] ):
return model.decode(
decoder_input_ids=snake_case_ , decoder_attention_mask=snake_case_ , encoder_outputs=snake_case_ , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase = decode_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase ( self : Dict ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCAmelCase = model(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def lowercase ( self : List[Any] ):
_UpperCAmelCase = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
_UpperCAmelCase = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
_UpperCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=snake_case_ )
_UpperCAmelCase = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
_UpperCAmelCase = ["Sam"]
_UpperCAmelCase = tokenizer(snake_case_ , return_tensors="jax" )
_UpperCAmelCase = model.generate(**snake_case_ , **snake_case_ )
_UpperCAmelCase = "Sam is a great name. It means \"sun\" in Gaelic."
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ , **snake_case_ )
assert generated_txt[0].strip() == tgt_text
| 236 |
a_ : str = 6_55_21
def __a ( __UpperCAmelCase ):
a__ = 1
a__ = 0
for plain_chr in plain_text:
a__ = (a + ord(__UpperCAmelCase )) % MOD_ADLER
a__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 194 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase__ : Tuple = """__DUMMY_TRANSFORMERS_USER__"""
UpperCAmelCase__ : Dict = """Dummy User"""
UpperCAmelCase__ : Union[str, Any] = """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"""
UpperCAmelCase__ : Union[str, Any] = """https://hub-ci.huggingface.co"""
UpperCAmelCase__ : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
UpperCAmelCase__ : Union[str, Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
UpperCAmelCase__ : List[Any] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def A ( snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , snake_case__ )
@pytest.fixture
def A ( snake_case__ : Tuple ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , snake_case__ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , snake_case__ )
@pytest.fixture
def A ( snake_case__ : Tuple ) -> int:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , snake_case__ )
@pytest.fixture
def A ( snake_case__ : str , snake_case__ : Tuple ) -> str:
'''simple docstring'''
HfFolder.save_token(snake_case__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=snake_case__ )
@pytest.fixture(scope='session' )
def A ( snake_case__ : Tuple ) -> int:
'''simple docstring'''
__snake_case = HfFolder.get_token()
HfFolder.save_token(snake_case__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case__ )
@pytest.fixture
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
def _cleanup_repo(snake_case__ : Optional[Any] ):
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
@contextmanager
def _temporary_repo(snake_case__ : Any ):
try:
yield repo_id
finally:
cleanup_repo(snake_case__ )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
__snake_case = f"repo_txt_data-{int(time.time() * 10e3 )}"
__snake_case = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data/text_data.txt' , repo_id=snake_case__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case = f"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
__snake_case = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data.zip' , repo_id=snake_case__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = f"repo_zipped_img_data-{int(time.time() * 10e3 )}"
__snake_case = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data.zip' , repo_id=snake_case__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = k_size // 2
_A , _A : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_A : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(snake_case_ ) + square(snake_case_ )) / (2 * square(snake_case_ )) )
return g
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A , _A : Any = image.shape[0], image.shape[1]
# dst image height and width
_A : List[Any] = height - k_size + 1
_A : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_A : List[str] = zeros((dst_height * dst_width, k_size * k_size) )
_A : Union[str, Any] = 0
for i, j in product(range(snake_case_ ),range(snake_case_ ) ):
_A : Optional[Any] = ravel(image[i : i + k_size, j : j + k_size] )
_A : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_A : int = gen_gaussian_kernel(snake_case_,snake_case_ )
_A : Union[str, Any] = ravel(snake_case_ )
# reshape and get the dst image
_A : Union[str, Any] = dot(snake_case_,snake_case_ ).reshape(snake_case_,snake_case_ ).astype(snake_case_ )
return dst
if __name__ == "__main__":
# read original image
_snake_case = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_snake_case = gaussian_filter(gray, 3, sigma=1)
_snake_case = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 307 |
import string
import numpy
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return b if a == 0 else greatest_common_divisor(b % a,snake_case_ )
class lowercase :
_a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_a = numpy.vectorize(lambda UpperCamelCase__ : x % 3_6 )
_a = numpy.vectorize(UpperCamelCase__ )
def __init__( self , _a ) -> None:
_A : Dict = self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : str = encrypt_key.shape[0]
def a__ ( self , _a ) -> int:
return self.key_string.index(_a )
def a__ ( self , _a ) -> str:
return self.key_string[round(_a )]
def a__ ( self ) -> None:
_A : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_A : Union[str, Any] = det % len(self.key_string )
_A : Optional[Any] = len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
_A : List[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_a )
def a__ ( self , _a ) -> str:
_A : List[str] = [char for char in text.upper() if char in self.key_string]
_A : Tuple = chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def a__ ( self , _a ) -> str:
_A : int = self.process_text(text.upper() )
_A : int = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_A : Dict = text[i : i + self.break_key]
_A : int = [self.replace_letters(_a ) for char in batch]
_A : str = numpy.array([vec] ).T
_A : Optional[Any] = self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
_A : Tuple = """""".join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def a__ ( self ) -> numpy.ndarray:
_A : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_A : str = det % len(self.key_string )
_A : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_A : Dict = i
break
_A : Union[str, Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper() )
_A : Optional[int] = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_A : Optional[int] = text[i : i + self.break_key]
_A : List[Any] = [self.replace_letters(_a ) for char in batch]
_A : Tuple = numpy.array([vec] ).T
_A : List[str] = self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
_A : str = """""".join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = int(input("""Enter the order of the encryption key: """ ) )
_A : Optional[int] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case_ ):
_A : Any = [int(snake_case_ ) for x in input().split()]
hill_matrix.append(snake_case_ )
_A : Dict = HillCipher(numpy.array(snake_case_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_A : str = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_A : str = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case_ ) )
elif option == "2":
_A : Tuple = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 307 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __UpperCamelCase :
lowercase : Optional[Any] = MBartConfig
lowercase : List[str] = {}
lowercase : Dict = 'gelu'
def __init__( self :Any ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :int=1_3 ,_UpperCamelCase :List[Any]=7 ,_UpperCamelCase :List[Any]=True ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :Any=9_9 ,_UpperCamelCase :Any=3_2 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :Any=4 ,_UpperCamelCase :Any=3_7 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :Optional[int]=2_0 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :int=1 ,_UpperCamelCase :List[Any]=0 ,):
snake_case_ : Optional[int] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : str = seq_length
snake_case_ : Any = is_training
snake_case_ : str = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Tuple = eos_token_id
snake_case_ : Dict = pad_token_id
snake_case_ : str = bos_token_id
def a__ ( self :Dict ):
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
snake_case_ : Tuple = tf.concat([input_ids, eos_tensor] ,axis=1 )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Tuple = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
snake_case_ : Dict = prepare_mbart_inputs_dict(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return config, inputs_dict
def a__ ( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :str ):
snake_case_ : Optional[int] = TFMBartModel(config=_UpperCamelCase ).get_decoder()
snake_case_ : Dict = inputs_dict["""input_ids"""]
snake_case_ : List[str] = input_ids[:1, :]
snake_case_ : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : str = inputs_dict["""head_mask"""]
snake_case_ : Dict = 1
# first forward pass
snake_case_ : Any = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,head_mask=_UpperCamelCase ,use_cache=_UpperCamelCase )
snake_case_ : str = outputs.to_tuple()
snake_case_ : str = past_key_values[1]
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : str = True
lowercase : Optional[Any] = False
lowercase : Union[str, Any] = False
def a__ ( self :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a__ ( self :int ):
snake_case_ : Tuple = TFMBartModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase )
def a__ ( self :List[Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase : Tuple = 'facebook/mbart-large-en-ro'
@cached_property
def a__ ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self :List[Any] ):
snake_case_ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ ( self :List[str] ,**_UpperCamelCase :Any ):
snake_case_ : Tuple = self.translate_src_text(**_UpperCamelCase )
self.assertListEqual(self.expected_text ,_UpperCamelCase )
def a__ ( self :Dict ,**_UpperCamelCase :Union[str, Any] ):
snake_case_ : List[Any] = self.tokenizer(self.src_text ,**_UpperCamelCase ,return_tensors="""tf""" )
snake_case_ : str = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
snake_case_ : Optional[int] = self.tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
return generated_words
@slow
def a__ ( self :int ):
self._assert_generated_batch_equal_expected() | 720 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Tuple = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
snake_case_ : str = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str]=None ):
'''simple docstring'''
snake_case_ : Union[str, Any] = {}
for old_key in state_dict.keys():
snake_case_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case_ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case_ : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
snake_case_ : List[str] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
snake_case_ : Any = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
snake_case_ : str = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
snake_case_ : str = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
snake_case_ : List[str] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
snake_case_ : Dict = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
snake_case_ : Dict = state_dict[old_key]
return new_dict
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :str = WEIGHTS_NAME ):
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : Dict = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
for expert in range(lowerCamelCase_ ):
snake_case_ : Optional[Any] = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(lowerCamelCase_ ):
snake_case_ : List[Any] = torch.load(lowerCamelCase_ )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
snake_case_ : List[str] = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[str] = os.path.join(
lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCamelCase_ )[0]].dtype )
# Add the last block
snake_case_ : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
snake_case_ : Tuple = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
snake_case_ : Tuple = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCamelCase_ ) == 1:
snake_case_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCamelCase_ , lowerCamelCase_ )
# Otherwise, let's build the index
snake_case_ : str = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case_ : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' )
snake_case_ : Optional[int] = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
for key in shard:
snake_case_ : Optional[int] = shard_file
# Add the metadata
snake_case_ : Any = {"""total_size""": total_size}
snake_case_ : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
snake_case_ : List[str] = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n"""
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : List[str] = parser.parse_args()
__A, __A : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A : Tuple = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : List[str] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 267 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __lowercase (_lowercase, _lowercase=(), _lowercase=None, _lowercase="no", _lowercase="29500" ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase : Dict = False
__lowerCamelCase : Any = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__lowerCamelCase : str = True
elif "IPython" in sys.modules:
__lowerCamelCase : Optional[int] = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__lowerCamelCase : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""", __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__lowerCamelCase : str = 8
__lowerCamelCase : Any = PrepareForLaunch(__lowercase, distributed_type="""TPU""" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__lowercase, args=__lowercase, nprocs=__lowercase, start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase, master_addr="""127.0.01""", master_port=__lowercase, mixed_precision=__lowercase ):
__lowerCamelCase : Union[str, Any] = PrepareForLaunch(__lowercase, distributed_type="""MULTI_GPU""" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__lowercase, args=__lowercase, nprocs=__lowercase, start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCamelCase : Dict = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*__lowercase )
def __lowercase (_lowercase, _lowercase=(), _lowercase=2 ) -> List[Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase, master_addr="""127.0.01""", master_port="""29500""", accelerate_mixed_precision="""no""", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="""yes""", ):
__lowerCamelCase : List[Any] = PrepareForLaunch(__lowercase, debug=__lowercase )
start_processes(__lowercase, args=__lowercase, nprocs=__lowercase, start_method="""fork""" )
| 150 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase_ ( _lowercase , _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = 1
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1_000 , _SCREAMING_SNAKE_CASE = None ) -> Tuple:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
__UpperCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCamelCase = 4
# running values
__UpperCamelCase = []
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> int:
__UpperCamelCase = num_inference_steps
__UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCamelCase = (1.0 - self.betas**2) ** 0.5
__UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCamelCase = timesteps.to(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = []
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
__UpperCamelCase = (self.timesteps == timestep).nonzero().item()
__UpperCamelCase = timestep_index + 1
__UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
__UpperCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCamelCase = self._get_prev_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> torch.FloatTensor:
return sample
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
__UpperCamelCase = self.alphas[timestep_index]
__UpperCamelCase = self.betas[timestep_index]
__UpperCamelCase = self.alphas[prev_timestep_index]
__UpperCamelCase = self.betas[prev_timestep_index]
__UpperCamelCase = (sample - sigma * ets) / max(_SCREAMING_SNAKE_CASE , 1e-8 )
__UpperCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 383 | 0 |
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = val
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__lowerCamelCase = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
__lowerCamelCase = Node(_a )
else:
self.right.insert(_a )
else:
__lowerCamelCase = val
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ):
# Recursive traversal
if root:
inorder(root.left ,snake_case_ )
res.append(root.val )
inorder(root.right ,snake_case_ )
def a__ ( _UpperCamelCase : Tuple ):
# Build BST
if len(snake_case_ ) == 0:
return arr
__lowerCamelCase = Node(arr[0] )
for i in range(1 ,len(snake_case_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
__lowerCamelCase = []
inorder(snake_case_ ,snake_case_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 710 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(A__ ) ,version.parse(A__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def snake_case ( A__ ,A__ = None ):
UpperCAmelCase_ : int = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = requirement, None, None
else:
UpperCAmelCase_ : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = match[0]
UpperCAmelCase_ : Optional[Any] = want_full.split("," ) # there could be multiple requirements
UpperCAmelCase_ : int = {}
for w in want_range:
UpperCAmelCase_ : str = re.findall(r"^([\s!=<>]{1,2})(.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : Any = match[0]
UpperCAmelCase_ : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase_ : List[Any] = ".".join([str(A__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
return
# check if any version is installed
try:
UpperCAmelCase_ : str = importlib.metadata.version(A__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Any = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(A__ ,A__ )
| 95 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """marian"""
lowerCamelCase = ["""past_key_values"""]
lowerCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase__ : Dict=5_8101 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Tuple=5_8100 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=5_8100 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : int=0 , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : int , ) -> int:
"""simple docstring"""
snake_case : List[Any] = vocab_size
snake_case : Optional[int] = decoder_vocab_size or vocab_size
snake_case : int = max_position_embeddings
snake_case : Tuple = d_model
snake_case : int = encoder_ffn_dim
snake_case : Optional[int] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : List[str] = decoder_ffn_dim
snake_case : List[str] = decoder_layers
snake_case : List[str] = decoder_attention_heads
snake_case : Any = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : Union[str, Any] = activation_function
snake_case : int = init_std
snake_case : Dict = encoder_layerdrop
snake_case : Dict = decoder_layerdrop
snake_case : List[Any] = use_cache
snake_case : int = encoder_layers
snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case : List[Any] = {0: '''batch'''}
snake_case : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case ,snake_case : Union[str, Any] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = super().outputs
else:
snake_case : int = super(UpperCamelCase__ , self ).outputs
if self.use_past:
snake_case ,snake_case : Optional[Any] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
snake_case : Optional[int] = seq_length if not self.use_past else 1
snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case : Any = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case ,snake_case : Dict = common_inputs['''input_ids'''].shape
snake_case : Any = common_inputs['''decoder_input_ids'''].shape[1]
snake_case ,snake_case : Tuple = self.num_attention_heads
snake_case : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Union[str, Any] = decoder_seq_length + 3
snake_case : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
snake_case : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case ,snake_case : str = self.num_layers
snake_case : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
snake_case : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
snake_case : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case ,snake_case : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case : int = seqlen + 2
snake_case ,snake_case : int = self.num_layers
snake_case ,snake_case : List[Any] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Tuple = common_inputs['''attention_mask'''].dtype
snake_case : Optional[int] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
snake_case : str = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : List[str] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
snake_case : str = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case : str = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : Union[str, Any] = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
snake_case : Tuple = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
snake_case : List[Any] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case : Optional[int] = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 638 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowerCamelCase_ : Dict = False
lowerCamelCase_ : str = False
def __lowercase( __snake_case : Namespace ) -> List[str]:
return TrainCommand(__snake_case )
class _lowerCamelCase (lowerCamelCase ):
@staticmethod
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
__snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE_ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE_ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE_ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE_ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE_ , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = logging.get_logger('transformers-cli/training' )
__snake_case = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = args.output
__snake_case = args.column_label
__snake_case = args.column_text
__snake_case = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = args.validation_split
__snake_case = args.train_batch_size
__snake_case = args.valid_batch_size
__snake_case = args.learning_rate
__snake_case = args.adam_epsilon
def __lowerCamelCase ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCamelCase ( self ):
raise NotImplementedError
def __lowerCamelCase ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 345 |
import os
import sys
import unittest
lowerCamelCase_ : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase_ : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = find_backend(' if not is_torch_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__snake_case = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'torch_and_transformers_and_onnx' )
def __lowerCamelCase ( self ):
__snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE_ )
self.assertIn('torch_and_transformers' , SCREAMING_SNAKE_CASE_ )
self.assertIn('flax_and_transformers' , SCREAMING_SNAKE_CASE_ )
self.assertIn('torch_and_transformers_and_onnx' , SCREAMING_SNAKE_CASE_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def __lowerCamelCase ( self ):
__snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '\nCONSTANT = None\n' )
__snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
__snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
__snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE_ )
| 345 | 1 |
from bisect import bisect
from itertools import accumulate
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = sorted(zip(__snake_case , __snake_case ) , key=lambda __snake_case : x[0] / x[1] , reverse=__snake_case )
__lowerCAmelCase , __lowerCAmelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCAmelCase = list(accumulate(__snake_case ) )
__lowerCAmelCase = bisect(__snake_case , __snake_case )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
lowerCamelCase : List[Any] = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
lowerCamelCase : Union[str, Any] = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = from_type.lower().strip("s" )
__lowerCAmelCase = to_type.lower().strip("s" )
__lowerCAmelCase = UNIT_SYMBOL.get(__snake_case , __snake_case )
__lowerCAmelCase = UNIT_SYMBOL.get(__snake_case , __snake_case )
if from_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(__snake_case )}"""
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(__snake_case )}"""
)
raise ValueError(__snake_case )
__lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
__lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
__lowerCAmelCase = 1
if from_exponent > to_exponent:
__lowerCAmelCase = from_exponent - to_exponent
else:
__lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 , __snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 | 1 |
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int:
UpperCAmelCase = 2**power
UpperCAmelCase = 0
while n:
UpperCAmelCase , UpperCAmelCase = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 627 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case : Dict = False
snake_case : int = logging.get_logger(__name__)
snake_case : List[Any] = 'ybelkada/fonts'
def lowercase__ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(__UpperCamelCase , ["""torch"""] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(__UpperCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __UpperCamelCase , __UpperCamelCase , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : int = 36 , __UpperCamelCase : str = "black" , __UpperCamelCase : str = "white" , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 5 , __UpperCamelCase : Optional[bytes] = None , __UpperCamelCase : Optional[str] = None , ):
'''simple docstring'''
requires_backends(__UpperCamelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=__UpperCamelCase )
__lowercase = """\n""".join(__UpperCamelCase )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(__UpperCamelCase )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(__UpperCamelCase , """Arial.TTF""" )
__lowercase = ImageFont.truetype(__UpperCamelCase , encoding="""UTF-8""" , size=__UpperCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , __UpperCamelCase ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , __UpperCamelCase , __UpperCamelCase )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new("""RGB""" , (image_width, image_height) , __UpperCamelCase )
__lowercase = ImageDraw.Draw(__UpperCamelCase )
draw.text(xy=(left_padding, top_padding) , text=__UpperCamelCase , fill=__UpperCamelCase , font=__UpperCamelCase )
return image
def lowercase__ ( __UpperCamelCase : np.ndarray , __UpperCamelCase : str , **__UpperCamelCase : int ):
'''simple docstring'''
requires_backends(__UpperCamelCase , """vision""" )
# Convert to PIL image if necessary
__lowercase = to_pil_image(__UpperCamelCase )
__lowercase = render_text(__UpperCamelCase , **__UpperCamelCase )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(__UpperCamelCase )
if infer_channel_dimension_format(__UpperCamelCase ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(__UpperCamelCase , ChannelDimension.LAST )
return new_image
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : str = ["flattened_patches"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 2_0_4_8 , __UpperCAmelCase = False , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
__lowercase = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(__UpperCAmelCase , ChannelDimension.FIRST )
__lowercase = torch.from_numpy(__UpperCAmelCase )
__lowercase , __lowercase = patch_size["""height"""], patch_size["""width"""]
__lowercase , __lowercase = get_image_size(__UpperCAmelCase )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) , __UpperCAmelCase ) , 1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) , __UpperCAmelCase ) , 1 )
__lowercase = max(num_feasible_rows * patch_height , 1 )
__lowercase = max(num_feasible_cols * patch_width , 1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=__UpperCAmelCase , antialias=__UpperCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(__UpperCAmelCase ).reshape([rows, 1] ).repeat(1 , __UpperCAmelCase ).reshape([rows * columns, 1] )
__lowercase = torch.arange(__UpperCAmelCase ).reshape([1, columns] ).repeat(__UpperCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(__UpperCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(__UpperCAmelCase )
return result
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
"""simple docstring"""
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(__UpperCAmelCase )
__lowercase = np.std(__UpperCAmelCase )
__lowercase = max(__UpperCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get("""data_format""" , __UpperCAmelCase ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
__lowercase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
__lowercase = kwargs.pop("""font_bytes""" , __UpperCAmelCase )
__lowercase = kwargs.pop("""font_path""" , __UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowercase = [header_text] * len(__UpperCAmelCase )
__lowercase = [
render_header(__UpperCAmelCase , header_text[i] , font_bytes=__UpperCAmelCase , font_path=__UpperCAmelCase )
for i, image in enumerate(__UpperCAmelCase )
]
if do_normalize:
__lowercase = [self.normalize(image=__UpperCAmelCase ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=__UpperCAmelCase , max_patches=__UpperCAmelCase , patch_size=__UpperCAmelCase )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=__UpperCAmelCase )
return encoded_outputs
| 566 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
snake_case : Dict = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
snake_case : Any = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
snake_case : Optional[int] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
__lowercase = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
__lowercase = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
__lowercase = np.array(__UpperCamelCase )
__lowercase = np.array(__UpperCamelCase )
__lowercase = en_sentvecs.shape[0]
# mean centering
__lowercase = en_sentvecs - np.mean(__UpperCamelCase , axis=0 )
__lowercase = in_sentvecs - np.mean(__UpperCamelCase , axis=0 )
__lowercase = cdist(__UpperCamelCase , __UpperCamelCase , """cosine""" )
__lowercase = np.array(range(__UpperCamelCase ) )
__lowercase = sim.argsort(axis=1 )[:, :10]
__lowercase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__( datasets.Metric ):
def __magic_name__ ( self ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 566 | 1 |
def UpperCamelCase__ ( _A: list , _A: list , _A: int ):
'''simple docstring'''
if len(_A ) != len(_A ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowerCamelCase = [p / w for p, w in zip(_A , _A )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowerCamelCase = sorted(_A )
# declaring useful variables
__lowerCamelCase = len(_A )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowerCamelCase = sorted_profit_by_weight[length - i - 1]
__lowerCamelCase = profit_by_weight.index(_A )
__lowerCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_a : List[Any] = [int(x) for x in input('Input profits separated by spaces: ').split()]
_a : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
_a : Any = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 571 |
_a : str = tuple[float, float, float]
_a : List[Any] = tuple[float, float, float]
def UpperCamelCase__ ( _A: Pointad , _A: Pointad ):
'''simple docstring'''
__lowerCamelCase = end_pointa[0] - end_pointa[0]
__lowerCamelCase = end_pointa[1] - end_pointa[1]
__lowerCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase__ ( _A: Vectorad , _A: Vectorad ):
'''simple docstring'''
__lowerCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase__ ( _A: Vectorad , _A: int ):
'''simple docstring'''
return tuple(round(_A , _A ) for x in vector ) == (0, 0, 0)
def UpperCamelCase__ ( _A: Pointad , _A: Pointad , _A: Pointad , _A: int = 10 ):
'''simple docstring'''
__lowerCamelCase = create_vector(_A , _A )
__lowerCamelCase = create_vector(_A , _A )
return is_zero_vector(get_ad_vectors_cross(_A , _A ) , _A )
| 571 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=3 ,)
return model
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.dummy_uncond_unet
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : List[Any] = self.dummy_vq_model
_lowerCamelCase : List[str] = LDMPipeline(unet=__lowerCAmelCase ,vqvae=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ).images
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : List[str] = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ,return_dict=__lowerCAmelCase )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Any = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="numpy" ).images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Dict = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : List[Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 46 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : str = {'facebook/blenderbot-3B': 1_2_8}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = ['''input_ids''', '''attention_mask''']
__lowercase : str = BlenderbotTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , __lowercase=True , **__lowercase , ):
"""simple docstring"""
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
__A : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __lowercase ) != add_prefix_space:
__A : List[Any] = getattr(__lowercase , pre_tok_state.pop('type' ) )
__A : Any = add_prefix_space
__A : Any = pre_tok_class(**__lowercase )
__A : Union[str, Any] = add_prefix_space
__A : List[Any] = 'post_processor'
__A : Optional[Any] = getattr(self.backend_tokenizer , __lowercase , __lowercase )
if tokenizer_component_instance:
__A : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A : Union[str, Any] = tuple(state['sep'] )
if "cls" in state:
__A : int = tuple(state['cls'] )
__A : Any = False
if state.get('add_prefix_space' , __lowercase ) != add_prefix_space:
__A : Any = add_prefix_space
__A : int = True
if state.get('trim_offsets' , __lowercase ) != trim_offsets:
__A : List[Any] = trim_offsets
__A : List[str] = True
if changes_to_apply:
__A : Optional[int] = getattr(__lowercase , state.pop('type' ) )
__A : Optional[int] = component_class(**__lowercase )
setattr(self.backend_tokenizer , __lowercase , __lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else value
__A : List[str] = value
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
__A : Tuple = kwargs.get('is_split_into_words' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self , *__lowercase , **__lowercase ):
"""simple docstring"""
__A : Any = kwargs.get('is_split_into_words' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : str = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : str = [self.sep_token_id]
__A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(__lowercase )
__A : Tuple = ' '.join(__lowercase )
__A : List[Any] = self.encode(__lowercase )
if len(__lowercase ) > self.model_max_length:
__A : Any = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 365 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = analyze_text(__snake_case)
snake_case__ : List[str] = list(""" """ + ascii_lowercase)
# what is our total sum of probabilities.
snake_case__ : int = sum(single_char_strings.values())
# one length string
snake_case__ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case__ : Union[str, Any] = single_char_strings[ch]
snake_case__ : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum):.1f}')
# two len string
snake_case__ : int = sum(two_char_strings.values())
snake_case__ : List[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case__ : str = cha + cha
if sequence in two_char_strings:
snake_case__ : Dict = two_char_strings[sequence]
snake_case__ : Any = int(__snake_case) / all_sum
my_sec_sum += prob * math.loga(__snake_case)
# print second entropy
print(F'{round(-1 * my_sec_sum):.1f}')
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}')
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[str] = Counter() # type: ignore
snake_case__ : List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__snake_case) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowercase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 713 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: str = logging.get_logger(__name__)
lowercase_: List[str] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Any = 'vit_msn'
def __init__( self : Optional[int] , __a : List[str]=7_6_8 , __a : Union[str, Any]=1_2 , __a : Dict=1_2 , __a : Union[str, Any]=3_0_7_2 , __a : int="gelu" , __a : int=0.0 , __a : Dict=0.0 , __a : Optional[int]=0.02 , __a : Any=1e-06 , __a : Any=2_2_4 , __a : Tuple=1_6 , __a : List[Any]=3 , __a : Tuple=True , **__a : Optional[Any] , ):
super().__init__(**__a )
snake_case__ : str = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Tuple = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Dict = qkv_bias
| 127 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
lowerCAmelCase_ : Any = list[tuple[int, int]]
lowerCAmelCase_ : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : Node | None ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : int = pos_x
_UpperCamelCase : List[Any] = pos_y
_UpperCamelCase : List[Any] = (pos_y, pos_x)
_UpperCamelCase : Any = goal_x
_UpperCamelCase : Optional[int] = goal_y
_UpperCamelCase : int = parent
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , lowercase__ : tuple[int, int] , lowercase__ : tuple[int, int] ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase__ )
_UpperCamelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase__ )
_UpperCamelCase : Tuple = [self.start]
_UpperCamelCase : Union[str, Any] = False
def snake_case__ ( self : Tuple ) ->Path | None:
'''simple docstring'''
while self.node_queue:
_UpperCamelCase : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_UpperCamelCase : Tuple = True
return self.retrace_path(lowercase__ )
_UpperCamelCase : List[str] = self.get_successors(lowercase__ )
for node in successors:
self.node_queue.append(lowercase__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self : int , lowercase__ : Node ) ->list[Node]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
for action in delta:
_UpperCamelCase : Any = parent.pos_x + action[1]
_UpperCamelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , lowercase__ ) )
return successors
def snake_case__ ( self : str , lowercase__ : Node | None ) ->Path:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = node
_UpperCamelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase : List[Any] = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , lowercase__ : Optional[int] , lowercase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = BreadthFirstSearch(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = BreadthFirstSearch(lowercase__ , lowercase__ )
_UpperCamelCase : int = False
def snake_case__ ( self : List[Any] ) ->Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_UpperCamelCase : Dict = self.fwd_bfs.node_queue.pop(0 )
_UpperCamelCase : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_UpperCamelCase : List[str] = True
return self.retrace_bidirectional_path(
lowercase__ , lowercase__ )
_UpperCamelCase : Dict = current_bwd_node
_UpperCamelCase : Optional[int] = current_fwd_node
_UpperCamelCase : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case__ ( self : Dict , lowercase__ : Node , lowercase__ : Node ) ->Path:
'''simple docstring'''
_UpperCamelCase : Tuple = self.fwd_bfs.retrace_path(lowercase__ )
_UpperCamelCase : int = self.bwd_bfs.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCamelCase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase_ : List[Any] = (0, 0)
lowerCAmelCase_ : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase_ : Any = time.time()
lowerCAmelCase_ : Dict = BreadthFirstSearch(init, goal)
lowerCAmelCase_ : List[str] = bfs.search()
lowerCAmelCase_ : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowerCAmelCase_ : List[str] = time.time()
lowerCAmelCase_ : Tuple = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase_ : Optional[int] = bd_bfs.search()
lowerCAmelCase_ : Dict = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 435 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : List[str] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 435 | 1 |
'''simple docstring'''
from itertools import permutations
def snake_case_ ( __snake_case : tuple) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase_ = [7, 11, 13, 17]
for i, test in enumerate(__snake_case):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case_ ( __snake_case : int = 10) -> int:
return sum(
int(''''''.join(map(__snake_case , __snake_case)))
for num in permutations(range(__snake_case))
if is_substring_divisible(__snake_case))
if __name__ == "__main__":
print(f'''{solution() = }''')
| 606 | '''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case_ ( __snake_case : Any) -> Any:
lowerCAmelCase_ = filter(lambda __snake_case: p.requires_grad , model.parameters())
lowerCAmelCase_ = sum([np.prod(p.size()) for p in model_parameters])
return params
A_ : Union[str, Any] =logging.getLogger(__name__)
def snake_case_ ( __snake_case : str , __snake_case : List[str]) -> str:
if metric == "rouge2":
lowerCAmelCase_ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase_ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase_ = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowerCAmelCase_ = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''')
lowerCAmelCase_ = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any]) -> List[Any]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__snake_case , verbose=__snake_case , )
class __UpperCAmelCase ( pl.Callback ):
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowerCAmelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCAmelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase_ = od / '''test_results.txt'''
lowerCAmelCase_ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase_ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowerCAmelCase_ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase_ = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
lowerCAmelCase_ = val.item()
lowerCAmelCase_ = F'''{key}: {val:.6f}\n'''
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase_ = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
try:
lowerCAmelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase_ = pl_module.model.num_parameters()
lowerCAmelCase_ = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 606 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( a : Optional[int] , a : List[str] , a : str , a : Optional[Any] , a : Tuple , a : List[Any] ):
if index == r:
for j in range(a ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
a__ = arr[i]
combination_util(a , a , a , index + 1 , a , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a , a , a , a , a , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( a : str , a : str , a : Optional[Any] ):
# A temporary array to store all combination one by one
a__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a , a , a , 0 , a , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__A : Union[str, Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 394 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = 'gptsan-japanese'
SCREAMING_SNAKE_CASE:str = [
'past_key_values',
]
SCREAMING_SNAKE_CASE:int = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=3_6000 , _a=1280 , _a=1024 , _a=8192 , _a=4096 , _a=128 , _a=10 , _a=0 , _a=16 , _a=16 , _a=128 , _a=0.0 , _a=1e-5 , _a=False , _a=0.0 , _a="float32" , _a=False , _a=False , _a=False , _a=0.002 , _a=False , _a=True , _a=3_5998 , _a=3_5995 , _a=3_5999 , **_a , ):
"""simple docstring"""
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = d_ff
a__ = d_ext
a__ = d_spout
a__ = num_switch_layers
a__ = num_ext_layers
a__ = num_switch_layers + num_ext_layers
a__ = num_heads
a__ = num_experts
a__ = expert_capacity
a__ = dropout_rate
a__ = layer_norm_epsilon
a__ = router_bias
a__ = router_jitter_noise
a__ = router_dtype
a__ = router_ignore_padding_tokens
a__ = output_hidden_states
a__ = output_attentions
a__ = initializer_factor
a__ = output_router_logits
a__ = use_cache
super().__init__(
separator_token_id=_a , pad_token_id=_a , eos_token_id=_a , **_a , )
| 394 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase=-1 ):
"""simple docstring"""
snake_case = label_idx
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
snake_case = mode.value
snake_case = os.path.join(lowerCAmelCase , F"""{mode}.txt""" )
snake_case = 1
snake_case = []
with open(lowerCAmelCase , encoding='utf-8' ) as f:
snake_case = []
snake_case = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=lowerCAmelCase , labels=lowerCAmelCase ) )
guid_index += 1
snake_case = []
snake_case = []
else:
snake_case = line.split(' ' )
words.append(splits[0] )
if len(lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=lowerCAmelCase , labels=lowerCAmelCase ) )
return examples
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCAmelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if path:
with open(lowerCAmelCase , 'r' ) as f:
snake_case = f.read().splitlines()
if "O" not in labels:
snake_case = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if path:
with open(lowerCAmelCase , 'r' ) as f:
snake_case = f.read().splitlines()
if "O" not in labels:
snake_case = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
snake_case = mode.value
snake_case = os.path.join(lowerCAmelCase , F"""{mode}.txt""" )
snake_case = 1
snake_case = []
with open(lowerCAmelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCAmelCase ):
snake_case = []
snake_case = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=lowerCAmelCase , labels=lowerCAmelCase ) )
guid_index += 1
return examples
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = 0
for sentence in parse_incr(lowerCAmelCase ):
snake_case = preds_list[example_id]
snake_case = ''
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(lowerCAmelCase )
example_id += 1
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if path:
with open(lowerCAmelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 104 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def _snake_case ( lowerCAmelCase : Optional[Any]="ro" , lowerCAmelCase : Union[str, Any]="en" , lowerCAmelCase : Any="wmt16" , lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
SCREAMING_SNAKE_CASE_ : str = datasets.load_dataset(_snake_case , _snake_case )
if save_dir is None:
SCREAMING_SNAKE_CASE_ : int = f'{dataset}-{pair}'
SCREAMING_SNAKE_CASE_ : List[str] = Path(_snake_case )
save_dir.mkdir(exist_ok=_snake_case )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
SCREAMING_SNAKE_CASE_ : Dict = "val" if split == "validation" else split
SCREAMING_SNAKE_CASE_ : List[str] = save_dir.joinpath(f'{fn}.source' )
SCREAMING_SNAKE_CASE_ : Optional[int] = save_dir.joinpath(f'{fn}.target' )
SCREAMING_SNAKE_CASE_ : Tuple = src_path.open("w+" )
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
SCREAMING_SNAKE_CASE_ : List[Any] = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 216 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> float:
'''simple docstring'''
def get_matched_characters(_snake_case : str , _snake_case : str ) -> str:
__magic_name__ : str = []
__magic_name__ : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__magic_name__ : str = int(max(0 , i - limit ) )
__magic_name__ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_snake_case )
__magic_name__ : Dict = F'''{_stra[0:_stra.index(_snake_case )]} {_stra[_stra.index(_snake_case ) + 1:]}'''
return "".join(_snake_case )
# matching characters
__magic_name__ : List[Any] = get_matched_characters(_snake_case , _snake_case )
__magic_name__ : Any = get_matched_characters(_snake_case , _snake_case )
__magic_name__ : List[str] = len(_snake_case )
# transposition
__magic_name__ : Tuple = (
len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case ) if ca != ca] ) // 2
)
if not match_count:
__magic_name__ : Tuple = 0.0
else:
__magic_name__ : List[str] = (
1
/ 3
* (
match_count / len(_snake_case )
+ match_count / len(_snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__magic_name__ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 124 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase__ =logging.getLogger(__name__)
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
lowercase__ =parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
lowercase__ =pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowercase__ =Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase__ =[0] * args.vocab_size
for k, v in counter.items():
lowercase__ =v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 326 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 155 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 155 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( _lowercase ):
def __init__( self : str , snake_case_ : int = 1_01)-> Tuple:
__lowerCAmelCase =length
def __len__( self : int)-> Any:
return self.length
def __getitem__( self : Optional[int] , snake_case_ : List[Any])-> int:
return i
class __a :
def __call__( self : str , snake_case_ : List[Any])-> Any:
return {"input_ids": torch.tensor(A_), "labels": torch.tensor(A_)}
class __a ( nn.Module ):
def __init__( self : Optional[Any])-> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowerCAmelCase =nn.Linear(1_20 , 80)
def UpperCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Tuple=None)-> Optional[Any]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class __a ( _lowercase ):
@require_torch_neuroncore
def UpperCamelCase ( self : Tuple)-> Optional[Any]:
__lowerCAmelCase =F"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
__lowerCAmelCase =self.get_auto_remove_tmp_dir()
__lowerCAmelCase =F"""--output_dir {output_dir}""".split()
__lowerCAmelCase =["""torchrun"""] + distributed_args + args
execute_subprocess_async(A_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( _lowercase ):
@require_torch_multi_gpu
def UpperCamelCase ( self : Any)-> Any:
__lowerCAmelCase =F"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
__lowerCAmelCase =self.get_auto_remove_tmp_dir()
__lowerCAmelCase =F"""--output_dir {output_dir}""".split()
__lowerCAmelCase =["""torchrun"""] + distributed_args + args
execute_subprocess_async(A_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase_ = HfArgumentParser((TrainingArguments,))
lowercase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
lowercase_ = DummyDataset(dataset_length)
def __lowerCAmelCase ( __lowerCamelCase : EvalPrediction ) -> List[str]:
__lowerCAmelCase =list(range(len(snake_case__ ) ) )
__lowerCAmelCase =p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowercase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = 2
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = None
| 700 |
def __lowerCAmelCase ( __lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) <= 1:
return lst
__lowerCAmelCase =1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCAmelCase , __lowerCAmelCase =lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCAmelCase =1
return lst
if __name__ == "__main__":
lowercase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 456 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : int = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, legacy_behaviour=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : int = False if not self.vocab_file else True
UpperCamelCase : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : str = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "eng_Latn", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "fra_Latn", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Union[str, Any] = src_lang
UpperCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : Optional[int] = [self.cur_lang_code]
UpperCamelCase : List[str] = [self.eos_token_id]
UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
if self.legacy_behaviour:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : int = [self.cur_lang_code]
UpperCamelCase : Tuple = [self.eos_token_id]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Any ):
"""simple docstring"""
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
lowerCAmelCase_ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__lowerCAmelCase )
lowerCAmelCase_ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase_ = sd.pop(__lowerCAmelCase )
lowerCAmelCase_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase_ = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase_ = key.replace(".qkv_proj." , ".q_proj." )
lowerCAmelCase_ = key.replace(".qkv_proj." , ".k_proj." )
lowerCAmelCase_ = key.replace(".qkv_proj." , ".v_proj." )
lowerCAmelCase_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = torch.split(__lowerCAmelCase , depth // 3 , dim=0 )
lowerCAmelCase_ = q
lowerCAmelCase_ = k
lowerCAmelCase_ = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
lowerCAmelCase_ = load_checkpoint(__lowerCAmelCase )
if config is not None:
lowerCAmelCase_ = OPTConfig.from_pretrained(__lowerCAmelCase )
else:
lowerCAmelCase_ = OPTConfig()
lowerCAmelCase_ = OPTModel(__lowerCAmelCase ).half().eval()
model.load_state_dict(__lowerCAmelCase )
# Check results
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_A = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 290 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=10 ):
_lowercase = []
for _ in range(snake_case_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=10 ):
_lowercase = []
for step in range(snake_case_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = os.path.join(snake_case_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , snake_case_ )
_lowercase = torch.load(snake_case_ )
scheduler.load_state_dict(snake_case_ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : int) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(lowercase__) , len(lowercase__))
for a, b in zip(lowercase__ , lowercase__):
self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__)
def _UpperCAmelCase ( self : str) ->Tuple:
"""simple docstring"""
_lowercase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase__)
_lowercase = torch.tensor([0.4, 0.2, -0.5])
_lowercase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_lowercase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0)
for _ in range(1_00):
_lowercase = criterion(lowercase__ , lowercase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
def _UpperCAmelCase ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowercase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase__)
_lowercase = torch.tensor([0.4, 0.2, -0.5])
_lowercase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_lowercase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase__ , weight_decay=0.0 , relative_step=lowercase__ , scale_parameter=lowercase__ , warmup_init=lowercase__ , )
for _ in range(10_00):
_lowercase = criterion(lowercase__ , lowercase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
@require_torch
class __a ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(5_0 ,5_0 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : List[str] = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : Optional[int] = 1_0
def _UpperCAmelCase ( self : str , lowercase__ : Any , lowercase__ : str , lowercase__ : Dict , lowercase__ : str=None) ->Any:
"""simple docstring"""
self.assertEqual(len(lowercase__) , len(lowercase__))
for a, b in zip(lowercase__ , lowercase__):
self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__ , msg=lowercase__)
def _UpperCAmelCase ( self : Tuple) ->Dict:
"""simple docstring"""
_lowercase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_lowercase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_lowercase , _lowercase = data
_lowercase = scheduler_func(self.optimizer , **lowercase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
_lowercase = unwrap_schedule(lowercase__ , self.num_steps)
self.assertListAlmostEqual(
lowercase__ , lowercase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
_lowercase = scheduler_func(self.optimizer , **lowercase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase__) # wrap to test picklability of the schedule
_lowercase = unwrap_and_save_reload_schedule(lowercase__ , self.num_steps)
self.assertListEqual(lowercase__ , lowercase__ , msg=f"""failed for {scheduler_func} in save and reload""")
class __a :
def __init__( self : int , lowercase__ : Optional[Any]) ->str:
"""simple docstring"""
_lowercase = fn
def __call__( self : Dict , *lowercase__ : str , **lowercase__ : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
return self.fn(*lowercase__ , **lowercase__)
@classmethod
def _UpperCAmelCase ( self : str , lowercase__ : Optional[Any]) ->int:
"""simple docstring"""
_lowercase = list(map(self , scheduler.lr_lambdas))
| 713 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if n_term == "":
return []
_lowercase = []
for temp in range(int(snake_case_ ) ):
series.append(F"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
_lowerCamelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 572 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Dict = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : Dict = None
a : int = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : Dict = 45
a : List[Any] = 1_581
a : Tuple = 1_517
a : Dict = 1_570
a : Any = 1_584
a : Optional[Any] = 1_793
a : Union[str, Any] = 1_795
a : Tuple = 1_916
a : Any = 1_864
a : Dict = 1_905
a : List[str] = 1_919
a : List[Any] = 2_429
a : Tuple = 2_208
a : Union[str, Any] = 2_418
a : Any = 2_323
a : Tuple = 2_407
# @@protoc_insertion_point(module_scope)
| 63 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_a: int = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_a: str = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_a: Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __A ( self : Dict ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __A ( self : Dict , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : str = False , lowerCAmelCase : Union[str, Any] = False , lowerCAmelCase : List[Any] = False , lowerCAmelCase : int = False , ):
'''simple docstring'''
UpperCAmelCase_ = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(A__ )]
UpperCAmelCase_ = TER(
normalized=A__ , no_punct=A__ , asian_support=A__ , case_sensitive=A__ , )
UpperCAmelCase_ = sb_ter.corpus_score(A__ , A__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 700 |
from __future__ import annotations
def __lowerCAmelCase ( A , A ):
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A ):
print(F"{i}\t\t{d}" )
def __lowerCAmelCase ( A , A , A ):
for j in range(A ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCAmelCase ( A , A , A , A ):
UpperCAmelCase_ = [float("inf" )] * vertex_count
UpperCAmelCase_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
UpperCAmelCase_ = distance[u] + w
UpperCAmelCase_ = check_negative_cycle(A , A , A )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_a: List[str] = int(input("""Enter number of vertices: """).strip())
_a: str = int(input("""Enter number of edges: """).strip())
_a: list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
_a , _a , _a: Any = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
_a: str = {"""src""": src, """dst""": dest, """weight""": weight}
_a: List[Any] = int(input("""\nEnter shortest path source:""").strip())
_a: Optional[int] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0) | 268 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCamelCase : Dict = datasets.logging.get_logger(__name__)
_lowerCamelCase : List[Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
_lowerCamelCase : int = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_lowerCamelCase : Any = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> Optional[int]:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
A__ , A__ = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(lowercase_ , lowercase_ )
A__ = reader.get_mention_assignments(lowercase_ , lowercase_ )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=False) ->Dict:
'''simple docstring'''
A__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(UpperCAmelCase__)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=UpperCAmelCase__ , sys_lines=UpperCAmelCase__ , metrics=UpperCAmelCase__ , NP_only=UpperCAmelCase__ , remove_nested=UpperCAmelCase__ , keep_singletons=UpperCAmelCase__ , min_span=UpperCAmelCase__ , )
return score
| 87 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 14 ) -> None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
a_ : Union[str, Any] = primes[group]['''prime''']
a_ : List[str] = primes[group]['''generator''']
a_ : str = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__SCREAMING_SNAKE_CASE )[2:]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> str:
a_ : str = int(__SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid public key''' )
a_ : Optional[int] = pow(__SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__SCREAMING_SNAKE_CASE , (prime - 1) // 2 , __SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 14 ) -> str:
a_ : Tuple = int(__SCREAMING_SNAKE_CASE , base=16 )
a_ : Any = int(__SCREAMING_SNAKE_CASE , base=16 )
a_ : List[str] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid public key''' )
a_ : Optional[int] = pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ChineseCLIPFeatureExtractor"""]
snake_case_ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_lowercase ):
__magic_name__ : Dict = ["torch", "transformers", "onnx"]
def __init__(self : List[str], *__UpperCAmelCase : Dict, **__UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Optional[Any], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Any, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["torch", "transformers", "onnx"]
def __init__(self : Dict, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Any, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : int, *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__(self : Dict, *__UpperCAmelCase : Dict, **__UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Tuple, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Union[str, Any], *__UpperCAmelCase : List[str], **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["torch", "transformers", "onnx"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : str, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : int, *__UpperCAmelCase : List[Any], **__UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["torch", "transformers", "onnx"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : List[str], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Any, *__UpperCAmelCase : List[str], **__UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Dict = ["torch", "transformers", "onnx"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Tuple, *__UpperCAmelCase : Any, **__UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Optional[Any], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
| 355 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase :Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase :Tuple = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase :List[Any] = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase :Optional[int] = '''CompVis/stable-diffusion-v1-4'''
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ):
super()._init_()
A_ : List[str] = StableDiffusionPipeline.from_pretrained(lowercase )
A_ : Optional[int] = StableDiffusionPipeline.from_pretrained(lowercase )
A_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowercase )
A_ : str = StableDiffusionPipeline(
vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , requires_safety_checker=lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _a (self ):
return {k: getattr(self , lowercase ) for k in self.config.keys() if not k.startswith("""_""" )}
def _a (self , lowercase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def _a (self ):
self.enable_attention_slicing(lowercase )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
return self.pipea(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
@torch.no_grad()
def _a (self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ):
A_ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
A_ : List[str] = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
A_ : Any = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
A_ : Union[str, Any] = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
A_ : str = self.textaimg_sda_a(
prompt=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , **lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 667 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = "camembert"
def __init__( self : int , lowercase__ : List[Any]=3_0_5_2_2 , lowercase__ : List[str]=7_6_8 , lowercase__ : Optional[Any]=1_2 , lowercase__ : str=1_2 , lowercase__ : Union[str, Any]=3_0_7_2 , lowercase__ : Dict="gelu" , lowercase__ : int=0.1 , lowercase__ : Dict=0.1 , lowercase__ : Tuple=5_1_2 , lowercase__ : Tuple=2 , lowercase__ : Dict=0.02 , lowercase__ : str=1e-12 , lowercase__ : Union[str, Any]=1 , lowercase__ : Tuple=0 , lowercase__ : Optional[Any]=2 , lowercase__ : Dict="absolute" , lowercase__ : List[Any]=True , lowercase__ : Union[str, Any]=None , **lowercase__ : Any , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
@property
def __magic_name__ ( self : Any ):
if self.task == "multiple-choice":
a_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 711 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowercase :
def __init__( self : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=1_3 , lowercase__ : int=7 , lowercase__ : Dict=True , lowercase__ : List[Any]=True , lowercase__ : int=True , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=9_9 , lowercase__ : Any=6_4 , lowercase__ : int=3_2 , lowercase__ : str=5 , lowercase__ : List[str]=4 , lowercase__ : str=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : Any=0.1 , lowercase__ : Any=0.1 , lowercase__ : Dict=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : List[str]=2 , lowercase__ : Dict=0.02 , lowercase__ : List[Any]=3 , lowercase__ : List[Any]=4 , lowercase__ : Optional[int]=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = embedding_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def __magic_name__ ( self : List[Any] ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Optional[Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[str] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : str ):
a_ = MegatronBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : int ):
a_ = MegatronBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[str] ):
a_ = MegatronBertForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : int , lowercase__ : str , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] ):
a_ = MegatronBertForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int ):
a_ = MegatronBertForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any] ):
a_ = MegatronBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : str , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] ):
a_ = self.num_labels
a_ = MegatronBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_labels
a_ = MegatronBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_choices
a_ = MegatronBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[Any] ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
# test_resize_embeddings = False
_lowerCAmelCase = False
def __magic_name__ ( self : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any=False ):
a_ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ )
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def __magic_name__ ( self : Any ):
a_ = MegatronBertModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase__ )
def __magic_name__ ( self : List[str] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase__ )
def __magic_name__ ( self : Tuple ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase__ )
def __magic_name__ ( self : int ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase__ )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
UpperCamelCase__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Union[str, Any] ):
a_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
a_ = os.path.join(os.environ['''MYDIR'''] , lowercase__ )
a_ = MegatronBertModel.from_pretrained(lowercase__ )
model.to(lowercase__ )
model.half()
a_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
a_ = model(lowercase__ )[0]
a_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , lowercase__ )
a_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
a_ = output[0, ii, jj]
a_ = expected[3 * ii + jj]
a_ = '''ii={} jj={} a={} b={}'''.format(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(math.isclose(lowercase__ , lowercase__ , rel_tol=lowercase__ , abs_tol=lowercase__ ) , msg=lowercase__ )
| 143 | 0 |
'''simple docstring'''
import os
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = len(grid[0] )
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase__ ):
for j in range(n_rows - 3 ):
__UpperCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__UpperCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__UpperCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__UpperCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__UpperCAmelCase = max(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if max_product > largest:
__UpperCAmelCase = max_product
return largest
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
with open(os.path.dirname(UpperCamelCase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__UpperCAmelCase = [[int(UpperCamelCase__ ) for i in grid[j]] for j in range(len(UpperCamelCase__ ) )]
return largest_product(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 262 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
a : Optional[Any] = tuple[int, int]
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
a__ = vertices
a__ = {
(min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a__ = weight
def lowercase__ ( self ) -> Graph:
"""simple docstring"""
a__ = Graph({min(self.vertices )} , {} )
a__ = 42
a__ = 42
a__ = 42
a__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
a__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a__ = edge
a__ = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return subgraph
def __magic_name__ ( UpperCamelCase : str = "p107_network.txt" ) -> int:
a__ = os.path.abspath(os.path.dirname(UpperCamelCase ) )
a__ = os.path.join(UpperCamelCase , UpperCamelCase )
a__ = {}
a__ = 42
a__ = 42
a__ = 42
with open(UpperCamelCase ) as f:
a__ = f.read().strip().split('\n' )
a__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCamelCase ) ):
for edgea in range(UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
a__ = int(adjaceny_matrix[edgea][edgea] )
a__ = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase )
a__ = graph.prims_algorithm()
a__ = sum(graph.edges.values() )
a__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
set_seed(770)
_lowerCAmelCase : Optional[int] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_lowerCAmelCase : int = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_lowerCAmelCase : Any = os.path.dirname(os.path.abspath(__file__))
_lowerCAmelCase : List[Any] = os.path.join(os.path.expanduser('~'), '.cache')
_lowerCAmelCase : Optional[int] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __UpperCamelCase ( _A : List[str] , _A : List[Any]=False ) -> str:
"""simple docstring"""
lowerCAmelCase : Dict = model_type
if use_small:
key += "_small"
return os.path.join(_A , REMOTE_MODEL_PATHS[key]['file_name'] )
def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
hf_hub_download(repo_id=_A , filename=_A , local_dir=_A )
def __UpperCamelCase ( _A : str , _A : Union[str, Any] , _A : Any=False , _A : Union[str, Any]="text" ) -> Optional[int]:
"""simple docstring"""
if model_type == "text":
lowerCAmelCase : int = BarkSemanticModel
lowerCAmelCase : Optional[Any] = BarkSemanticConfig
lowerCAmelCase : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase : str = BarkCoarseModel
lowerCAmelCase : Dict = BarkCoarseConfig
lowerCAmelCase : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase : Optional[int] = BarkFineModel
lowerCAmelCase : str = BarkFineConfig
lowerCAmelCase : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase : Optional[Any] = F"{model_type}_small" if use_small else model_type
lowerCAmelCase : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_A ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCAmelCase : Optional[int] = torch.load(_A , map_location=_A )
# this is a hack
lowerCAmelCase : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase : Any = model_args['vocab_size']
lowerCAmelCase : List[str] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase : Any = model_args.pop('n_head' )
lowerCAmelCase : Tuple = model_args.pop('n_embd' )
lowerCAmelCase : List[str] = model_args.pop('n_layer' )
lowerCAmelCase : Tuple = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase : List[Any] = ModelClass(config=_A )
lowerCAmelCase : Union[str, Any] = GenerationConfigClass()
lowerCAmelCase : Dict = model_generation_config
lowerCAmelCase : List[Any] = checkpoint['model']
# fixup checkpoint
lowerCAmelCase : Any = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_A ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase : Union[str, Any] = k[len(_A ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase : int = new_k.replace(_A , new_layer_name_dict[old_layer_name] )
lowerCAmelCase : Dict = state_dict.pop(_A )
lowerCAmelCase : Any = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase : str = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase : int = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase : Optional[int] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_A ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(_A ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(_A , strict=_A )
lowerCAmelCase : Tuple = model.num_parameters(exclude_embeddings=_A )
lowerCAmelCase : int = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(_A , 3 )} loss" )
model.eval()
model.to(_A )
del checkpoint, state_dict
return model
def __UpperCamelCase ( _A : int , _A : List[str]=False , _A : Union[str, Any]="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase : str = 'cpu' # do conversion on cpu
lowerCAmelCase : str = _get_ckpt_path(_A , use_small=_A )
lowerCAmelCase : List[Any] = _load_model(_A , _A , model_type=_A , use_small=_A )
# load bark initial model
lowerCAmelCase : Any = _bark_load_model(_A , 'cpu' , model_type=_A , use_small=_A )
if model_type == "text":
lowerCAmelCase : Optional[Any] = bark_model['model']
if model.num_parameters(exclude_embeddings=_A ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase : Union[str, Any] = 5
lowerCAmelCase : Optional[Any] = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase : int = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase : Dict = bark_model(_A )[0]
lowerCAmelCase : int = model(_A )
# take last logits
lowerCAmelCase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase : int = 3
lowerCAmelCase : int = 8
lowerCAmelCase : List[str] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase : Optional[Any] = model(_A , _A )
lowerCAmelCase : int = bark_model(_A , _A )
lowerCAmelCase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
def __UpperCamelCase ( _A : str , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : List[str] , _A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = os.path.join(_A , _A )
lowerCAmelCase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(_A , 'config.json' ) )
lowerCAmelCase : int = BarkCoarseConfig.from_pretrained(os.path.join(_A , 'config.json' ) )
lowerCAmelCase : Tuple = BarkFineConfig.from_pretrained(os.path.join(_A , 'config.json' ) )
lowerCAmelCase : str = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase : int = BarkSemanticModel.from_pretrained(_A )
lowerCAmelCase : Optional[int] = BarkCoarseModel.from_pretrained(_A )
lowerCAmelCase : str = BarkFineModel.from_pretrained(_A )
lowerCAmelCase : List[str] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase : Optional[Any] = BarkConfig.from_sub_model_configs(
_A , _A , _A , _A )
lowerCAmelCase : int = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase : Dict = BarkModel(_A )
lowerCAmelCase : int = semantic
lowerCAmelCase : Union[str, Any] = coarseAcoustic
lowerCAmelCase : int = fineAcoustic
lowerCAmelCase : Dict = codec
lowerCAmelCase : List[Any] = bark_generation_config
Path(_A ).mkdir(exist_ok=_A )
bark.save_pretrained(_A , repo_id=_A , push_to_hub=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_lowerCAmelCase : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 702 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = PegasusTokenizer
_lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Optional[int] = '</s>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(snake_case__ ) , 1103 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase : List[Any] = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = ['This is going to be way too long.' * 150, 'short example']
lowerCAmelCase : int = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : Dict = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : Dict = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = PegasusTokenizer
_lowerCamelCase : str = PegasusTokenizerFast
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = True
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def lowercase ( self , **snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self , snake_case__ ):
return ("This is a test", "This is a test")
def lowercase ( self ):
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase : Dict = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
lowerCAmelCase : Union[str, Any] = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ , snake_case__ )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Optional[int] = ['This is going to be way too long.' * 1000, 'short example']
lowerCAmelCase : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase : List[str] = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
lowerCAmelCase : List[str] = self._large_tokenizer(
text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def lowercase ( self ):
lowerCAmelCase : List[str] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase : Tuple = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 646 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.