code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase : Optional[Any] =logging.get_logger(__name__)
# General docstring
lowerCamelCase : Optional[Any] ='''RegNetConfig'''
# Base docstring
lowerCamelCase : Any ='''facebook/regnet-y-040'''
lowerCamelCase : Dict =[1, 1088, 7, 7]
# Image classification docstring
lowerCamelCase : Dict ='''facebook/regnet-y-040'''
lowerCamelCase : Union[str, Any] ='''tabby, tabby cat'''
lowerCamelCase : Union[str, Any] =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( tf.keras.layers.Layer ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] = 3 , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : str = 1 , SCREAMING_SNAKE_CASE : Any = "relu" , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase__ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase__ : Dict = tf.keras.layers.ConvaD(
filters=UpperCamelCase_ , kernel_size=UpperCamelCase_ , strides=UpperCamelCase_ , padding="VALID" , groups=UpperCamelCase_ , use_bias=UpperCamelCase_ , name="convolution" , )
UpperCamelCase__ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
UpperCamelCase__ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.convolution(self.padding(UpperCamelCase_ ) )
UpperCamelCase__ : int = self.normalization(UpperCamelCase_ )
UpperCamelCase__ : Tuple = self.activation(UpperCamelCase_ )
return hidden_state
class __a ( tf.keras.layers.Layer ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : Dict = config.num_channels
UpperCamelCase__ : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = shape_list(UpperCamelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase__ : int = tf.transpose(UpperCamelCase_ , perm=(0, 2, 3, 1) )
UpperCamelCase__ : Dict = self.embedder(UpperCamelCase_ )
return hidden_state
class __a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any = 2 , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : Optional[Any] = tf.keras.layers.ConvaD(
filters=UpperCamelCase_ , kernel_size=1 , strides=UpperCamelCase_ , use_bias=UpperCamelCase_ , name="convolution" )
UpperCamelCase__ : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] = False ):
'''simple docstring'''
return self.normalization(self.convolution(UpperCamelCase_ ) , training=UpperCamelCase_ )
class __a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name="pooler" )
UpperCamelCase__ : str = [
tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=UpperCamelCase_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.pooler(UpperCamelCase_ )
for layer_module in self.attention:
UpperCamelCase__ : Tuple = layer_module(UpperCamelCase_ )
UpperCamelCase__ : Tuple = hidden_state * pooled
return hidden_state
class __a ( tf.keras.layers.Layer ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] = 1 , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : Union[str, Any] = in_channels != out_channels or stride != 1
UpperCamelCase__ : Any = max(1 , out_channels // config.groups_width )
UpperCamelCase__ : Optional[Any] = (
TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase__ : Optional[Any] = [
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name="layer.2" ),
]
UpperCamelCase__ : List[Any] = ACTaFN[config.hidden_act]
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Any = hidden_state
for layer_module in self.layers:
UpperCamelCase__ : int = layer_module(UpperCamelCase_ )
UpperCamelCase__ : Union[str, Any] = self.shortcut(UpperCamelCase_ )
hidden_state += residual
UpperCamelCase__ : str = self.activation(UpperCamelCase_ )
return hidden_state
class __a ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = in_channels != out_channels or stride != 1
UpperCamelCase__ : int = max(1 , out_channels // config.groups_width )
UpperCamelCase__ : Tuple = (
TFRegNetShortCut(UpperCamelCase_ , stride=UpperCamelCase_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
UpperCamelCase__ : List[Any] = [
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(UpperCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ , name="layer.3" ),
]
UpperCamelCase__ : int = ACTaFN[config.hidden_act]
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase__ : Optional[Any] = layer_module(UpperCamelCase_ )
UpperCamelCase__ : Optional[Any] = self.shortcut(UpperCamelCase_ )
hidden_state += residual
UpperCamelCase__ : Dict = self.activation(UpperCamelCase_ )
return hidden_state
class __a ( tf.keras.layers.Layer ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any = 2 , SCREAMING_SNAKE_CASE : Dict = 2 , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : List[str] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
UpperCamelCase__ : Optional[Any] = [
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , name="layers.0" ),
*[layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase__ : List[str] = layer_module(UpperCamelCase_ )
return hidden_state
class __a ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
UpperCamelCase__ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCamelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ , name=F'stages.{i+1}' ) )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple = False , SCREAMING_SNAKE_CASE : Union[str, Any] = True ):
'''simple docstring'''
UpperCamelCase__ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : List[Any] = hidden_states + (hidden_state,)
UpperCamelCase__ : Optional[Any] = stage_module(UpperCamelCase_ )
if output_hidden_states:
UpperCamelCase__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ )
@keras_serializable
class __a ( tf.keras.layers.Layer ):
_lowerCAmelCase : Union[str, Any] = RegNetConfig
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ : Any = config
UpperCamelCase__ : Dict = TFRegNetEmbeddings(UpperCamelCase_ , name="embedder" )
UpperCamelCase__ : Any = TFRegNetEncoder(UpperCamelCase_ , name="encoder" )
UpperCamelCase__ : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCamelCase_ , name="pooler" )
@unpack_inputs
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : List[Any] = False , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : List[str] = self.embedder(UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ : Union[str, Any] = encoder_outputs[0]
UpperCamelCase__ : str = self.pooler(UpperCamelCase_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase__ : Tuple = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) )
UpperCamelCase__ : List[str] = tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] = tuple([tf.transpose(UpperCamelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __a ( A__ ):
_lowerCAmelCase : str = RegNetConfig
_lowerCAmelCase : List[Any] = '''regnet'''
_lowerCAmelCase : int = '''pixel_values'''
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase : Any =R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase : Optional[int] =R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A__ , )
class __a ( A__ ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ : str = TFRegNetMainLayer(UpperCamelCase_ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Tuple=False , ):
'''simple docstring'''
UpperCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Tuple = self.regnet(
pixel_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , A__ , )
class __a ( A__ , A__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ : List[str] = config.num_labels
UpperCamelCase__ : Dict = TFRegNetMainLayer(UpperCamelCase_ , name="regnet" )
# classification head
UpperCamelCase__ : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : List[Any] = None , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : Tuple=False , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : List[str] = self.regnet(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ : str = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Tuple = self.classifier[0](UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = self.classifier[1](UpperCamelCase_ )
UpperCamelCase__ : Dict = None if labels is None else self.hf_compute_loss(labels=UpperCamelCase_ , logits=UpperCamelCase_ )
if not return_dict:
UpperCamelCase__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states ) | 189 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 0 |
"""simple docstring"""
A_ : int = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = set()
# keep track of all the paths to be checked
lowerCamelCase__ : Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase__ : List[Any] = queue.pop(0 )
# get the last node from the path
lowerCamelCase__ : Optional[Any] = path[-1]
if node not in explored:
lowerCamelCase__ : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase__ : str = list(_lowerCamelCase )
new_path.append(_lowerCamelCase )
queue.append(_lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase__ : Optional[int] = [start]
lowerCamelCase__ : str = set(_lowerCamelCase )
# Keep tab on distances from `start` node.
lowerCamelCase__ : Dict = {start: 0, target: -1}
while queue:
lowerCamelCase__ : List[Any] = queue.pop(0 )
if node == target:
lowerCamelCase__ : int = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCamelCase )
queue.append(_lowerCamelCase )
lowerCamelCase__ : int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 367 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BioGptTokenizer
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a :Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
a :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = '''lower newer'''
a :str = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = BioGptTokenizer(self.vocab_file , self.merges_file )
a :Union[str, Any] = '''lower'''
a :Tuple = ['''low''', '''er</w>''']
a :str = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :int = tokens + ['''<unk>''']
a :Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
a :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
a :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
a :int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
a :str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 94 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a ( __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = model.config
UpperCamelCase__ :List[str] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCamelCase__ :int = MBartConfig(
is_decoder=__a , is_encoder_decoder=__a , add_cross_attention=__a , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__a , add_final_layer_norm=__a , )
return encoder_config, decoder_config
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
if "encoder.model" in name:
UpperCamelCase__ :Optional[int] = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
UpperCamelCase__ :Union[str, Any] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
UpperCamelCase__ :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase__ :List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
UpperCamelCase__ :Any = '''encoder.''' + name
if "attn.proj" in name:
UpperCamelCase__ :Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
UpperCamelCase__ :Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ :Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ :Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ :Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ :int = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase__ :Optional[int] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCamelCase__ :Dict = '''encoder.layernorm.bias'''
return name
def a ( __a , __a ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase__ :Union[str, Any] = orig_state_dict.pop(__a )
if "qkv" in key:
UpperCamelCase__ :Dict = key.split('''.''' )
UpperCamelCase__ :Optional[Any] = int(key_split[3] )
UpperCamelCase__ :str = int(key_split[5] )
UpperCamelCase__ :str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ :str = val[:dim, :]
UpperCamelCase__ :Union[str, Any] = val[dim : dim * 2, :]
UpperCamelCase__ :List[Any] = val[-dim:, :]
else:
UpperCamelCase__ :Optional[int] = val[:dim]
UpperCamelCase__ :Dict = val[dim : dim * 2]
UpperCamelCase__ :Optional[int] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCamelCase__ :List[Any] = val
return orig_state_dict
def a ( __a , __a=None , __a=False ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Any = DonutModel.from_pretrained(__a ).eval()
# load HuggingFace model
UpperCamelCase__ , UpperCamelCase__ :Any = get_configs(__a )
UpperCamelCase__ :Tuple = DonutSwinModel(__a )
UpperCamelCase__ :List[Any] = MBartForCausalLM(__a )
UpperCamelCase__ :Optional[int] = VisionEncoderDecoderModel(encoder=__a , decoder=__a )
model.eval()
UpperCamelCase__ :Union[str, Any] = original_model.state_dict()
UpperCamelCase__ :Tuple = convert_state_dict(__a , __a )
model.load_state_dict(__a )
# verify results on scanned document
UpperCamelCase__ :Optional[Any] = load_dataset('''hf-internal-testing/example-documents''' )
UpperCamelCase__ :Optional[int] = dataset['''test'''][0]['''image'''].convert('''RGB''' )
UpperCamelCase__ :Dict = XLMRobertaTokenizerFast.from_pretrained(__a , from_slow=__a )
UpperCamelCase__ :Union[str, Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCamelCase__ :Any = DonutProcessor(__a , __a )
UpperCamelCase__ :Any = processor(__a , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCamelCase__ :Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase__ :List[str] = '''When is the coffee break?'''
UpperCamelCase__ :int = task_prompt.replace('''{user_input}''' , __a )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCamelCase__ :List[Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCamelCase__ :int = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCamelCase__ :List[str] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCamelCase__ :Dict = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCamelCase__ :List[Any] = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
UpperCamelCase__ :Union[str, Any] = original_model.decoder.tokenizer(__a , add_special_tokens=__a , return_tensors='''pt''' )[
'''input_ids'''
]
UpperCamelCase__ :str = original_model.encoder.model.patch_embed(__a )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = model.encoder.embeddings(__a )
assert torch.allclose(__a , __a , atol=1e-3 )
# verify encoder hidden states
UpperCamelCase__ :Union[str, Any] = original_model.encoder(__a )
UpperCamelCase__ :str = model.encoder(__a ).last_hidden_state
assert torch.allclose(__a , __a , atol=1e-2 )
# verify decoder hidden states
UpperCamelCase__ :Tuple = original_model(__a , __a , __a ).logits
UpperCamelCase__ :Tuple = model(__a , decoder_input_ids=__a ).logits
assert torch.allclose(__a , __a , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
__snake_case = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 219 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( __a ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = str(__a )
UpperCamelCase__ :Dict = [n]
for i in range(1 , len(__a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a ( __a ) -> bool:
'''simple docstring'''
if len(str(__a ) ) > 3:
if not is_prime(int(str(__a )[-3:] ) ) or not is_prime(int(str(__a )[:3] ) ):
return False
return True
def a ( __a = 11 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :list[int] = []
UpperCamelCase__ :int = 13
while len(__a ) != count:
if validate(__a ):
UpperCamelCase__ :Optional[int] = list_truncated_nums(__a )
if all(is_prime(__a ) for i in list_nums ):
list_truncated_primes.append(__a )
num += 2
return list_truncated_primes
def a ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""") | 219 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
_SCREAMING_SNAKE_CASE = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCamelCase :Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCamelCase :Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCamelCase :int = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCamelCase :float = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCamelCase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCAmelCase )} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCamelCase :str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase :str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCamelCase :Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCamelCase :Optional[int] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_=1_92 , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_=0.6 ) -> Optional[int]:
_A = input_size
_A = mask_patch_size
_A = model_patch_size
_A = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
_A = self.input_size // self.mask_patch_size
_A = self.mask_patch_size // self.model_patch_size
_A = self.rand_size**2
_A = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Dict:
_A = np.random.permutation(self.token_count )[: self.mask_count]
_A = np.zeros(self.token_count , dtype=lowerCAmelCase_ )
_A = 1
_A = mask.reshape((self.rand_size, self.rand_size) )
_A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def snake_case ( snake_case__ :Optional[Any]) -> Union[str, Any]:
_A = torch.stack([example["""pixel_values"""] for example in examples])
_A = torch.stack([example["""mask"""] for example in examples])
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def snake_case ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(""".json"""):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , snake_case__ , snake_case__)
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(snake_case__)
transformers.utils.logging.set_verbosity(snake_case__)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""")
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__) and data_args.train_val_split > 0.0:
_A = ds["""train"""].train_test_split(data_args.train_val_split)
_A = split["""train"""]
_A = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_A = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__)
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__)
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""")
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''')
config.update_from_string(model_args.config_overrides)
logger.info(F'''New config: {config}''')
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , """decoder_type"""):
_A = """simmim"""
# adapt config
_A = model_args.image_size if model_args.image_size is not None else config.image_size
_A = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_A = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
})
# create image processor
if model_args.image_processor_name:
_A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__)
elif model_args.model_name_or_path:
_A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__)
else:
_A = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_A = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_A = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""")
_A = AutoModelForMaskedImageModeling.from_config(snake_case__)
if training_args.do_train:
_A = ds["""train"""].column_names
else:
_A = ds["""validation"""].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = """image"""
elif "img" in column_names:
_A = """img"""
else:
_A = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_A = Compose(
[
Lambda(lambda snake_case__: img.convert("""RGB""") if img.mode != "RGB" else img),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0)),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std),
])
# create mask generator
_A = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__ :str):
_A = [transforms(snake_case__) for image in examples[image_column_name]]
_A = [mask_generator() for i in range(len(examples[image_column_name]))]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""")
if data_args.max_train_samples is not None:
_A = ds["""train"""].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(snake_case__)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""")
if data_args.max_eval_samples is not None:
_A = (
ds["""validation"""].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__)
# Initialize our trainer
_A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=snake_case__)
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics)
trainer.save_metrics("""train""" , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case__)
trainer.save_metrics("""eval""" , snake_case__)
# Write model card and (optionally) push to hub
_A = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__)
else:
trainer.create_model_card(**snake_case__)
if __name__ == "__main__":
main()
| 180 | from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any:
_A = []
for part_id in partition_order:
_A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(snake_case__):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Optional[Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(2)
_A = [1, 0]
_A = _generate_iterable_examples(snake_case__ , snake_case__) # Reverse the partitions.
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__)
for i, (row_id, row_dict) in enumerate(generate_fn()):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> int:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(1)
_A = SparkExamplesIterable(snake_case__)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
_A = lambda snake_case__: x.reverse()
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0])
_A = SparkExamplesIterable(snake_case__).shuffle_data_sources(snake_case__)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> List[str]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(20).repartition(4)
# Partitions 0 and 2
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Tuple:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 180 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __snake_case (_a ):
lowerCAmelCase__ = "levit"
def __init__( self : Dict , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : List[Any]=[128, 256, 384] , _UpperCAmelCase : Any=[4, 8, 12] , _UpperCAmelCase : int=[4, 4, 4] , _UpperCAmelCase : Dict=[16, 16, 16] , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Union[str, Any]=[2, 2, 2] , _UpperCAmelCase : int=[2, 2, 2] , _UpperCAmelCase : Dict=0.02 , **_UpperCAmelCase : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Tuple = kernel_size
_lowerCAmelCase : List[str] = stride
_lowerCAmelCase : List[Any] = padding
_lowerCAmelCase : Tuple = hidden_sizes
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : str = key_dim
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Optional[Any] = attention_ratio
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[str] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __snake_case (_a ):
lowerCAmelCase__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
| 360 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = None
if token is not None:
_lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_lowerCAmelCase : str = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if token is not None:
_lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_lowerCAmelCase : Optional[int] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : List[str] = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : str = None
if token is not None:
_lowerCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : List[str] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : List[str] = result.headers["""Location"""]
_lowerCAmelCase : List[Any] = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : int = os.path.join(UpperCamelCase_ , F"{artifact_name}.zip" )
with open(UpperCamelCase_ , """wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = None
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase_ ) as f:
for line in f:
_lowerCAmelCase : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
_lowerCAmelCase : Union[str, Any] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_lowerCAmelCase : Tuple = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase_ )
elif filename == "job_name.txt":
_lowerCAmelCase : str = line
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` "
F"and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_lowerCAmelCase : int = None
if job_name and job_links:
_lowerCAmelCase : Optional[int] = job_links.get(UpperCamelCase_ , UpperCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )]
return result
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) )
return errors
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Dict = counter.most_common()
_lowerCAmelCase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : int = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_lowerCAmelCase : Optional[Any] = test.split("""/""" )[2]
else:
_lowerCAmelCase : Union[str, Any] = None
return test
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[str] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : int = {x[2] for x in logs}
_lowerCAmelCase : str = {}
for test in tests:
_lowerCAmelCase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : List[Any] = counter.most_common()
_lowerCAmelCase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : int = {"""count""": n_errors, """errors""": error_counts}
_lowerCAmelCase : Dict = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """| no. | error | status |"""
_lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
_lowerCAmelCase : str = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[Any] = reduced_by_error[error]["""count"""]
_lowerCAmelCase : int = F"| {count} | {error[:100]} | |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
_lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
_lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : Union[str, Any] = reduced_by_model[model]["""count"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
_lowerCAmelCase : str = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowerCamelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase : Optional[Any] = k.find(" / ")
_lowerCamelCase : Tuple = k[index + len(" / ") :]
_lowerCamelCase : List[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : str = reduce_by_error(errors)
_lowerCamelCase : Tuple = reduce_by_model(errors)
_lowerCamelCase : List[str] = make_github_table(reduced_by_error)
_lowerCamelCase : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 159 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=0 ) -> str:
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[column] )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=float('''inf''' ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
__a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__a = current_dis
return min_dis
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=float('''inf''' ) ) -> Union[str, Any]:
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase__ ):
__a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__a = current_dis
return min_dis
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ , lowerCAmelCase__ )
# recursion
__a = points_counts // 2
__a = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[:mid] , lowerCAmelCase__ )
__a = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
__a = min(lowerCAmelCase__ , lowerCAmelCase__ )
__a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
__a = dis_between_closest_in_strip(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return min(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
__a = column_based_sort(lowerCAmelCase__ , column=0 )
__a = column_based_sort(lowerCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
lowercase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 45 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
# Load configuration defined in the metadata file
with open(__SCREAMING_SNAKE_CASE ) as metadata_file:
__lowerCAmelCase: List[Any] = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = LukeConfig(use_entity_aware_attention=__SCREAMING_SNAKE_CASE , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__lowerCAmelCase: Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )["module"]
# Load the entity vocab file
__lowerCAmelCase: List[Any] = load_original_entity_vocab(__SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
__lowerCAmelCase: Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__lowerCAmelCase: Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCAmelCase: str = AddedToken("<ent>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = AddedToken("<ent2>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "r" ) as f:
__lowerCAmelCase: Optional[int] = json.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = "MLukeTokenizer"
with open(os.path.join(__SCREAMING_SNAKE_CASE , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__lowerCAmelCase: Optional[int] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__lowerCAmelCase: Dict = state_dict["embeddings.word_embeddings.weight"]
__lowerCAmelCase: Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
__lowerCAmelCase: int = word_emb[enta_init_index].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__lowerCAmelCase: Dict = state_dict[bias_name]
__lowerCAmelCase: Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
__lowerCAmelCase: Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
__lowerCAmelCase: Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCAmelCase: Optional[int] = F"encoder.layer.{layer_index}.attention.self."
__lowerCAmelCase: Tuple = state_dict[prefix + matrix_name]
__lowerCAmelCase: Dict = state_dict[prefix + matrix_name]
__lowerCAmelCase: Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCAmelCase: int = state_dict["entity_embeddings.entity_embeddings.weight"]
__lowerCAmelCase: Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__lowerCAmelCase: List[str] = state_dict["entity_predictions.bias"]
__lowerCAmelCase: Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__lowerCAmelCase: str = torch.cat([entity_prediction_bias, entity_mask_bias] )
__lowerCAmelCase: Optional[int] = LukeForMaskedLM(config=__SCREAMING_SNAKE_CASE ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__lowerCAmelCase: Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__lowerCAmelCase: Any = state_dict[key]
else:
__lowerCAmelCase: Tuple = state_dict[key]
__lowerCAmelCase: Tuple = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
if set(__SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , task="entity_classification" )
__lowerCAmelCase: Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__lowerCAmelCase: Optional[Any] = (0, 9)
__lowerCAmelCase: Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
__lowerCAmelCase: int = model(**__SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCAmelCase: Dict = torch.Size((1, 3_3, 7_6_8) )
__lowerCAmelCase: Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCAmelCase: Union[str, Any] = torch.Size((1, 1, 7_6_8) )
__lowerCAmelCase: Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__lowerCAmelCase: Tuple = MLukeTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = "Tokyo is the capital of <mask>."
__lowerCAmelCase: List[str] = (2_4, 3_0)
__lowerCAmelCase: int = tokenizer(__SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="pt" )
__lowerCAmelCase: Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = encoding["input_ids"][0].tolist()
__lowerCAmelCase: int = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__lowerCAmelCase: Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = outputs.entity_logits[0][0].argmax().item()
__lowerCAmelCase: Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__SCREAMING_SNAKE_CASE ) )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Tuple = ["[MASK]", "[PAD]", "[UNK]"]
__lowerCAmelCase: Optional[Any] = [json.loads(__SCREAMING_SNAKE_CASE ) for line in open(__SCREAMING_SNAKE_CASE )]
__lowerCAmelCase: str = {}
for entry in data:
__lowerCAmelCase: Tuple = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__lowerCAmelCase: Optional[int] = entity_id
break
__lowerCAmelCase: Optional[Any] = F"{language}:{entity_name}"
__lowerCAmelCase: Optional[int] = entity_id
return new_mapping
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 357 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 108 | 0 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> bool:
lowerCAmelCase__ : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase_ ( __UpperCAmelCase = 5000 ) -> int:
lowerCAmelCase__ : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCAmelCase )]
for i, pentagonal_i in enumerate(__UpperCAmelCase ):
for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
lowerCAmelCase__ : List[Any] = pentagonal_nums[j]
lowerCAmelCase__ : List[str] = pentagonal_i + pentagonal_j
lowerCAmelCase__ : Optional[int] = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCAmelCase ) and is_pentagonal(__UpperCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 242 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( __UpperCAmelCase ) -> bool:
return np.array_equal(__UpperCAmelCase , matrix.conjugate().T )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = v.conjugate().T
lowerCAmelCase__ : Optional[int] = v_star.dot(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , np.ndarray )
return (v_star_dot.dot(__UpperCAmelCase )) / (v_star.dot(__UpperCAmelCase ))
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 242 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Optional[int] = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ ):
return 1_0 - x * x
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
lowerCamelCase_ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) < 0:
lowerCamelCase_ = c
else:
lowerCamelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 19 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : str = "bart"
SCREAMING_SNAKE_CASE : Optional[int] = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> int:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : Any = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Dict = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Any = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : List[Any] = sas_model.eval()
else:
_lowercase , _lowercase : Union[str, Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> str:
if LOAD_DENSE_INDEX:
_lowercase : Optional[Any] = faiss.StandardGpuResources()
_lowercase : Optional[int] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Union[str, Any] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Any = (None, None)
_lowercase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : List[str] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : Optional[Any] = elia['train_eli5']
_lowercase : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[str]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : List[str] = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : Union[str, Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Dict = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : str = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : List[Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Union[str, Any] = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> Dict:
with torch.no_grad():
_lowercase : str = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Any = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[int] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : int = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : int = "wiki40b"
SCREAMING_SNAKE_CASE : int = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : List[str] = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Any = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : int = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : int = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : str = None
# start main text
SCREAMING_SNAKE_CASE : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : List[str] = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : Optional[Any] = support_list[:10]
SCREAMING_SNAKE_CASE : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Any = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : str = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Any = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : str = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 21 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
snake_case : int = logging.get_logger(__name__)
snake_case : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
snake_case : Any = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
snake_case : Dict = {
"facebook/blenderbot_small-90M": 512,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = BlenderbotSmallTokenizer
def __init__( self , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , _a=True , **_a , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_a , merges=_a , add_prefix_space=_a , trim_offsets=_a , ) , bos_token=_a , eos_token=_a , unk_token=_a , **_a , )
__magic_name__ : List[str] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self , _a , _a=None ):
__magic_name__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 41 |
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__magic_name__ : Dict = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
__magic_name__ : str = sylvester(number - 1 )
__magic_name__ : Tuple = num - 1
__magic_name__ : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def a ( __a ) -> typing.Counter[int]:
'''simple docstring'''
UpperCamelCase__ :typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__a , max_perimeter + 1 ):
UpperCamelCase__ :int = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
UpperCamelCase__ :Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a ( __a = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""") | 97 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int]="attention" ) -> Tuple:
__snake_case = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__snake_case = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__snake_case = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__snake_case = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> List[str]:
if split_mlp_wi:
__snake_case = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__snake_case = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__snake_case = (wi_a, wi_a)
else:
__snake_case = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__snake_case = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : int ) -> Dict:
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def lowerCamelCase__ ( snake_case_ : dict , *, snake_case_ : int , snake_case_ : bool ) -> Optional[Any]:
__snake_case = traverse_util.flatten_dict(variables['''target'''] )
__snake_case = {'''/'''.join(snake_case_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , snake_case_ )
__snake_case = collections.OrderedDict()
# Shared embeddings.
__snake_case = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case_ ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''encoder''' , '''pre_attention_layer_norm''' )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(snake_case_ , snake_case_ , '''encoder''' , '''attention''' )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (MLP).
__snake_case = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''encoder''' , '''pre_mlp_layer_norm''' )
__snake_case , __snake_case = tax_mlp_lookup(snake_case_ , snake_case_ , '''encoder''' , snake_case_ )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__snake_case = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(snake_case_ ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(snake_case_ , snake_case_ , '''decoder''' , '''self_attention''' )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (Cross Attention).
__snake_case = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(snake_case_ , snake_case_ , '''decoder''' , '''encoder_decoder_attention''' )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 2 (MLP).
__snake_case = tax_layer_norm_lookup(snake_case_ , snake_case_ , '''decoder''' , '''pre_mlp_layer_norm''' )
__snake_case , __snake_case = tax_mlp_lookup(snake_case_ , snake_case_ , '''decoder''' , snake_case_ )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old['''decoder/decoder_norm/scale''']
__snake_case = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : bool ) -> str:
__snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__snake_case = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Any ) -> Dict:
__snake_case = checkpoints.load_tax_checkpoint(snake_case_ )
__snake_case = convert_tax_to_pytorch(snake_case_ , num_layers=config.num_layers , is_encoder_only=snake_case_ )
__snake_case = make_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ , strict=snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : bool = False ) -> Dict:
__snake_case = TaConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case = TaEncoderModel(snake_case_ )
else:
__snake_case = TaForConditionalGeneration(snake_case_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case_ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case_ )
print('''Done''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
snake_case_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 238 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''bird'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''Chef in the kitchen'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 238 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> str:
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path / "cache"
_a = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : int ) -> Optional[int]:
_a = tmp_path / "cache"
_a = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_a = features.copy() if features else default_expected_features
_a = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : Optional[int] ) -> Optional[Any]:
_a = tmp_path / "cache"
_a = os.path.join(lowercase , "tmp.sql" )
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
_a = iter_sql_file(lowercase )
_a = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def _lowerCamelCase ( lowercase : Any , lowercase : Any , lowercase : Union[str, Any] ) -> List[Any]:
_a = tmp_path / "cache"
_a = os.path.join(lowercase , "tmp.sql" )
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
_a = iter_sql_file(lowercase )
_a = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Tuple:
_a = tmp_path / "cache"
_a = os.path.join(lowercase , "tmp.sql" )
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 63 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Optional[Any]="binary" , lowerCamelCase_ :int=None , lowerCamelCase_ :List[Any]="warn" , ):
"""simple docstring"""
lowerCamelCase__ : List[str] =recall_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ , zero_division=lowerCamelCase_ , )
return {"recall": float(lowerCamelCase_ ) if score.size == 1 else score} | 126 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCamelCase = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
__UpperCamelCase = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCamelCase = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCamelCase = sorted(arg_to_scheduler.keys())
__UpperCamelCase = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class _A ( pl.LightningModule ):
def __init__( self : Optional[Any] , __magic_name__ : argparse.Namespace , __magic_name__ : str=None , __magic_name__ : Any="base" , __magic_name__ : int=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Tuple=None , **__magic_name__ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__magic_name__ )
__snake_case : int = 0
__snake_case : Optional[int] = Path(self.hparams.output_dir )
__snake_case : Union[str, Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__snake_case : Optional[int] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , )
else:
__snake_case : PretrainedConfig = config
__snake_case : Optional[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __magic_name__ , __magic_name__ ):
assert hasattr(self.config , __magic_name__ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) )
if tokenizer is None:
__snake_case : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , )
else:
__snake_case : PreTrainedTokenizer = tokenizer
__snake_case : Optional[Any] = MODEL_MODES[mode]
if model is None:
__snake_case : Dict = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , )
else:
__snake_case : Optional[Any] = model
def lowercase__ ( self : str , *__magic_name__ : Union[str, Any] , **__magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
__snake_case : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__snake_case : Tuple = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model
__snake_case : List[Any] = ["""bias""", """LayerNorm.weight"""]
__snake_case : List[str] = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__snake_case : Dict = Adafactor(
__magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ )
else:
__snake_case : Any = AdamW(
__magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__snake_case : Tuple = optimizer
__snake_case : Dict = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
return self.validation_step(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
return self.validation_end(__magic_name__ )
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__snake_case : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__snake_case : Union[str, Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if stage == "test":
__snake_case : Optional[int] = len(self.test_dataloader().dataset )
else:
__snake_case : int = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__magic_name__ )
__snake_case : Optional[Any] = len(self.train_dataloader().dataset )
def lowercase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> Any:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.train_loader
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowercase__ ( self : Optional[Any] , __magic_name__ : Dict[str, Any] ) -> None:
"""simple docstring"""
__snake_case : Optional[int] = self.output_dir.joinpath("""best_tfmr""" )
__snake_case : Optional[Any] = self.step_count
self.model.save_pretrained(__magic_name__ )
self.tokenizer.save_pretrained(__magic_name__ )
@staticmethod
def lowercase__ ( __magic_name__ : List[str] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__magic_name__ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__magic_name__ , type=__magic_name__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__magic_name__ ).parent / """test_run""" / """cache""" ) , type=__magic_name__ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__magic_name__ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__magic_name__ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__magic_name__ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__magic_name__ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__magic_name__ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__magic_name__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__magic_name__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__magic_name__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__magic_name__ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__magic_name__ )
parser.add_argument("""--train_batch_size""" , default=32 , type=__magic_name__ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__magic_name__ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class _A ( pl.Callback ):
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Any ) -> int:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _A ( pl.Callback ):
def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> str:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__magic_name__ )
class _A ( pl.Callback ):
def lowercase__ ( self : str , __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = trainer.lr_schedulers[0]["""scheduler"""]
__snake_case : List[str] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> int:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__magic_name__ , str(metrics[key] ) ) )
def lowercase__ ( self : str , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> Optional[int]:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__snake_case : Union[str, Any] = trainer.callback_metrics
# Log and save results to file
__snake_case : Optional[int] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__magic_name__ , """w""" ) as writer:
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__magic_name__ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__magic_name__ , str(metrics[key] ) ) )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(_lowerCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=_lowerCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_lowerCamelCase , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=_lowerCamelCase )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=_lowerCamelCase , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=_lowerCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=_lowerCamelCase , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(_lowerCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=_lowerCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) -> Union[str, Any]:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__snake_case : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__snake_case : List[str] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowerCamelCase )
if logging_callback is None:
__snake_case : Optional[int] = LoggingCallback()
__snake_case : List[str] = {}
if args.fpaa:
__snake_case : List[str] = 16
if args.gpus > 1:
__snake_case : List[Any] = """auto"""
__snake_case : int = """ddp"""
__snake_case : int = args.accumulate_grad_batches
__snake_case : Dict = None
__snake_case : Union[str, Any] = """auto"""
__snake_case : List[str] = pl.Trainer.from_argparse_args(
_lowerCamelCase , weights_summary=_lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowerCamelCase , )
if args.do_train:
trainer.fit(_lowerCamelCase )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
"""simple docstring"""
import cva
import numpy as np
class __snake_case :
def __init__( self , lowercase , lowercase) -> str:
'''simple docstring'''
if k in (0.04, 0.06):
a__: Optional[Any] = k
a__: Dict = window_size
else:
raise ValueError('invalid k value')
def __str__( self) -> str:
'''simple docstring'''
return str(self.k)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Any = cva.imread(__UpperCAmelCase , 0)
a__ , a__: List[str] = img.shape
a__: Any = []
a__: Dict = img.copy()
a__: Optional[Any] = cva.cvtColor(__UpperCAmelCase , cva.COLOR_GRAY2RGB)
a__ , a__: Union[str, Any] = np.gradient(__UpperCAmelCase)
a__: Tuple = dx**2
a__: Tuple = dy**2
a__: Optional[Any] = dx * dy
a__: Optional[int] = 0.04
a__: Tuple = self.window_size // 2
for y in range(__UpperCAmelCase , h - offset):
for x in range(__UpperCAmelCase , w - offset):
a__: int = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a__: Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a__: Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a__: List[str] = (wxx * wyy) - (wxy**2)
a__: List[str] = wxx + wyy
a__: List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
lowercase__ = HarrisCorner(0.04, 3)
lowercase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 290 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
__UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase ))
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 316 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def snake_case_ ( snake_case ) -> str:
return "".join(sorted(snake_case ) )
def snake_case_ ( snake_case ) -> list[str]:
return word_by_signature[signature(snake_case )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 357 |
def snake_case_ ( snake_case ) -> list[int]:
lowercase__: Dict = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
lowercase__ , lowercase__: Union[str, Any] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__: List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__: List[str] = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ , lowercase__: List[Any] = i, i + z_result[i] - 1
return z_result
def snake_case_ ( snake_case , snake_case , snake_case ) -> bool:
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__: Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 0 |
from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : List[str] = NewType('''DataClass''', Any)
__lowerCamelCase : Dict = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> int:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Callable[[str], Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *,
__UpperCamelCase : Union[str, List[str]] = None , __UpperCamelCase : str = None , __UpperCamelCase : Any = dataclasses.MISSING , __UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCamelCase : dict = None , **__UpperCamelCase : Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
def __init__( self : int , _lowercase : Union[DataClassType, Iterable[DataClassType]] , **_lowercase : List[str] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowercase )
if dataclasses.is_dataclass(_lowercase ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(_lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowercase )
@staticmethod
def __a ( _lowercase : ArgumentParser , _lowercase : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""--{field.name}"""
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowercase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""aliases""" , [] )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowercase , """UnionType""" ) and isinstance(_lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowercase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(_lowercase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(_lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , _lowercase ) and issubclass(field.type , _lowercase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(_lowercase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(_lowercase ) and issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = """+"""
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(_lowercase , *_lowercase , **_lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_lowercase )
def __a ( self : List[str] , _lowercase : DataClassType ):
"""simple docstring"""
if hasattr(_lowercase , """_argument_group_name""" ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(_lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowercase ):
SCREAMING_SNAKE_CASE__ = """.""".join(map(_lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowercase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(_lowercase , _lowercase )
def __a ( self : str , _lowercase : int=None , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=True , _lowercase : Any=None , _lowercase : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(_lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(_lowercase , type=_lowercase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = vars(_lowercase ).get(args_file_flag.lstrip("""-""" ) , _lowercase )
if cmd_args_file_paths:
args_files.extend([Path(_lowercase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(_lowercase ).items() if k in keys}
for k in keys:
delattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __a ( self : List[str] , _lowercase : Dict[str, Any] , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_lowercase )}""" )
return tuple(_lowercase )
def __a ( self : List[str] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
with open(Path(_lowercase ) , encoding="""utf-8""" ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(_lowercase , allow_extra_keys=_lowercase )
return tuple(_lowercase )
def __a ( self : Tuple , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(_lowercase ).read_text() ) , allow_extra_keys=_lowercase )
return tuple(_lowercase )
| 219 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( _a , unittest.TestCase ):
"""simple docstring"""
__A : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __lowercase ( self , lowercase=0) -> int:
'''simple docstring'''
a__ : List[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase))
a__ : Dict = np.random.RandomState(lowercase)
a__ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowercase)
a__ : int = self.get_dummy_inputs()
a__ : Optional[Any] = pipe(**lowercase).images
a__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
a__ : Optional[int] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Dict = self.get_dummy_inputs()
a__ : Tuple = pipe(**lowercase).images
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a__ : Dict = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__ : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
# warmup pass to apply optimizations
a__ : List[Any] = pipe(**self.get_dummy_inputs())
a__ : Dict = self.get_dummy_inputs()
a__ : Union[str, Any] = pipe(**lowercase).images
a__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a__ : Tuple = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__ : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Dict = self.get_dummy_inputs()
a__ : List[str] = pipe(**lowercase).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a__ : Dict = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
a__ : List[str] = self.get_dummy_inputs()
a__ : Tuple = pipe(**lowercase).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a__ : List[Any] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
a__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs()
a__ : Optional[int] = pipe(**lowercase).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
a__ : Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = ort.SessionOptions()
a__ : str = False
return options
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a__ : Union[str, Any] = init_image.resize((768, 512))
# using the PNDM scheduler by default
a__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = 'A fantasy landscape, trending on artstation'
a__ : int = np.random.RandomState(0)
a__ : str = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase , output_type='np' , )
a__ : int = output.images
a__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
a__ : Optional[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
a__ : Dict = init_image.resize((768, 512))
a__ : int = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx')
a__ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase)
a__ : Dict = 'A fantasy landscape, trending on artstation'
a__ : Tuple = np.random.RandomState(0)
a__ : List[Any] = pipe(
prompt=lowercase , image=lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase , output_type='np' , )
a__ : Dict = output.images
a__ : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
a__ : Optional[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 368 |
import re
from filelock import FileLock
try:
import nltk
lowercase : List[str] = True
except (ImportError, ModuleNotFoundError):
lowercase : Tuple = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def A_ ( A__ ) -> str:
re.sub('<n>' , '' , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 225 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowercase) , '''Tatoeba directory does not exist.''')
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_lowerCAmelCase )
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 131 |
def _lowerCAmelCase ( lowerCAmelCase_ :int = 1_000 )->int:
'''simple docstring'''
snake_case_ , snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ , snake_case_ = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 159 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
_lowerCamelCase : int = 'us-east-1' # defaults region
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__lowerCAmelCase = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
__lowerCAmelCase = {**hyperparameters, "max_steps": 10_00}
@property
def A (self : Any ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def A (self : Optional[Any] ):
return F"""{self.framework}-transfromers-test"""
@property
def A (self : Dict ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def A (self : Dict ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __a ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = SageMakerTestEnvironment(framework=request.cls.framework )
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv2FeatureExtractor"]
__A = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = 8
# DPR tok
lowerCAmelCase : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : Dict = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : str = {"unk_token": "<unk>"}
lowerCAmelCase : int = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : int = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Dict = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowerCAmelCase : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case__ )
rag_tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowerCAmelCase : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowerCAmelCase : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : str = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 108 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def lowercase_ ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = int(A__ )
assert noofclusters < len(A__ )
# Find out the dimensionality
snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case = list(range(len(A__ ) ) )
shuffle(A__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(A__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case = tf.placeholder("float64" , [dim] )
snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(A__ , A__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case = [tf.Variable(0 ) for i in range(len(A__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case = tf.placeholder("int32" )
snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(A__ , A__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case = tf.reduce_mean(A__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case = tf.placeholder("float" , [dim] )
snake_case = tf.placeholder("float" , [dim] )
snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(A__ , A__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case = tf.placeholder("float" , [noofclusters] )
snake_case = tf.argmin(A__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(A__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case = 100
for _ in range(A__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(A__ ) ):
snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case = [
sess.run(A__ , feed_dict={va: vect, va: sess.run(A__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case = sess.run(
A__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(A__ ):
# Collect all the vectors assigned to this cluster
snake_case = [
vectors[i]
for i in range(len(A__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case = sess.run(
A__ , feed_dict={mean_input: array(A__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case = sess.run(A__ )
snake_case = sess.run(A__ )
return centroids, assignments
| 351 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__(self : Union[str, Any] , _A : Any , _A : Tuple=1_3 , _A : Optional[int]=7 , _A : Any=True , _A : str=True , _A : Union[str, Any]=True , _A : Optional[int]=True , _A : str=9_9 , _A : str=2_4 , _A : int=2 , _A : Optional[Any]=6 , _A : int=3_7 , _A : List[Any]="gelu" , _A : str=0.1 , _A : Dict=0.1 , _A : Dict=5_1_2 , _A : Tuple=1_6 , _A : List[str]=2 , _A : Dict=0.02 , _A : List[str]=3 , _A : Optional[Any]=None , _A : Dict=1_0_0_0 , ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
snake_case = range_bbox
def UpperCAmelCase(self : List[str] ) -> List[str]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case = bbox[i, j, 3]
snake_case = bbox[i, j, 1]
snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case = bbox[i, j, 2]
snake_case = bbox[i, j, 0]
snake_case = t
snake_case = None
if self.use_input_mask:
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase(self : Tuple ) -> Tuple:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase(self : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : str , _A : Optional[Any] , _A : Tuple , ) -> Dict:
snake_case = LiltModel(config=_A )
model.to(_A )
model.eval()
snake_case = model(_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase(self : Optional[Any] , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : Union[str, Any] , ) -> Optional[int]:
snake_case = self.num_labels
snake_case = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase(self : str , _A : List[Any] , _A : Union[str, Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , ) -> Optional[int]:
snake_case = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase(self : str ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = False
def UpperCAmelCase(self : Dict , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : int , _A : Union[str, Any] ) -> int:
return True
def UpperCAmelCase(self : str ) -> Tuple:
snake_case = LiltModelTester(self )
snake_case = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def UpperCAmelCase(self : Optional[int] ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : Tuple ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : int ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
snake_case = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_A )
snake_case = torch.tensor([[1, 2]] , device=_A )
snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_A )
# forward pass
with torch.no_grad():
snake_case = model(input_ids=_A , bbox=_A )
snake_case = torch.Size([1, 2, 7_6_8] )
snake_case = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_A , )
self.assertTrue(outputs.last_hidden_state.shape , _A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _A , atol=1E-3 ) )
| 137 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __A ( unittest.TestCase ):
def _lowercase (self : Dict ):
UpperCAmelCase_ = "laion/clap-htsat-unfused"
UpperCAmelCase_ = tempfile.mkdtemp()
def _lowercase (self : Dict , **__a : List[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def _lowercase (self : str , **__a : Union[str, Any] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def _lowercase (self : str ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
UpperCAmelCase_ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = floats_list((3, 1000) )
UpperCAmelCase_ = feature_extractor(__a , return_tensors="np" )
UpperCAmelCase_ = processor(audios=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = "This is a test string"
UpperCAmelCase_ = processor(text=__a )
UpperCAmelCase_ = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(__a )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_feature_extractor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
snake_case__ = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any]=0 ) -> str:
_UpperCamelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCamelCase ) )
_UpperCamelCase = torch.manual_seed(__UpperCamelCase )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self : List[str] ) -> int:
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__UpperCamelCase ).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self : int ) -> Any:
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__UpperCamelCase ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self : Any ) -> Tuple:
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__UpperCamelCase ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self : Dict ) -> List[str]:
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__UpperCamelCase ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__UpperCamelCase ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def _UpperCamelCase ( self : Any ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def _UpperCamelCase ( self : Any ) -> Any:
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_UpperCamelCase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = '''A fantasy landscape, trending on artstation'''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type='''np''' , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self : Tuple ) -> str:
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_UpperCamelCase = init_image.resize((128, 128) )
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = '''A fantasy landscape, trending on artstation'''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type='''np''' , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 54 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase = """
Human: <<task>>
Assistant: """
UpperCAmelCase = """huggingface-tools/default-prompts"""
UpperCAmelCase = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowercase ( a__ : int , a__ : int , a__ : Any="run" ) -> Any:
if prompt_or_repo_id is None:
_UpperCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a__ ) is not None:
return prompt_or_repo_id
_UpperCamelCase = cached_file(
a__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 54 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_A : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
_A : Union[str, Any] =os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
_A : Optional[Any] ='''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : int = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
lowerCamelCase__ : Union[str, Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[Any] ):
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
lowerCamelCase__ : Tuple = cached_file("""tiny-random-bert""" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
lowerCamelCase__ : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : str = cached_file(UpperCamelCase__ , """conf""" )
def lowerCamelCase_ ( self: Optional[int] ):
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
lowerCamelCase__ : Any = cached_file(UpperCamelCase__ , """conf""" )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
lowerCamelCase__ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""" ) ) )
lowerCamelCase__ : Optional[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : Dict = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = mock.Mock()
lowerCamelCase__ : str = 500
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Union[str, Any] = HTTPError
lowerCamelCase__ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__ ) as mock_head:
lowerCamelCase__ : List[Any] = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self: Dict ):
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Optional[Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""" )
lowerCamelCase__ : Tuple = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__ : str = json.loads(open(UpperCamelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCamelCase_ ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : int = Path(UpperCamelCase__ ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt""" ) )
| 41 |
'''simple docstring'''
_A : Union[str, Any] =range(2, 20 + 1)
_A : List[str] =[10**k for k in range(ks[-1] + 1)]
_A : dict[int, dict[int, list[list[int]]]] ={}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : List[str] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCamelCase__ : int = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = 0, 0
lowerCamelCase__ : List[str] = n - i
lowerCamelCase__ : Optional[Any] = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCamelCase__ : str = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCamelCase__ : Optional[Any] = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCamelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCamelCase__ : Dict = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Any = []
else:
lowerCamelCase__ : str = {c: []}
lowerCamelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCamelCase__ , lowerCamelCase__ : Dict = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCamelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCamelCase__ : List[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCamelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCamelCase__ : int = 0
for j in range(UpperCamelCase ):
lowerCamelCase__ : str = a_i[j] + addend
lowerCamelCase__ , lowerCamelCase__ : int = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCamelCase__ : List[Any] = digits[j] + addend
if s >= 10:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
lowerCamelCase__ : Any = addend // 10 + quotient
else:
lowerCamelCase__ : Any = s
lowerCamelCase__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowerCamelCase__ , lowerCamelCase__ : Any = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 10**15 ) -> int:
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Tuple = 0
while True:
lowerCamelCase__ , lowerCamelCase__ : Any = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCamelCase__ : Union[str, Any] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'{solution() = }')
| 41 | 1 |
"""simple docstring"""
class _lowerCamelCase :
def __init__( self : Union[str, Any] , UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = val
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Any = None
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase__ : Optional[int] = Node(UpperCamelCase )
else:
self.left.insert(UpperCamelCase )
elif val > self.val:
if self.right is None:
lowerCAmelCase__ : Dict = Node(UpperCamelCase )
else:
self.right.insert(UpperCamelCase )
else:
lowerCAmelCase__ : Optional[Any] = val
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ):
# Recursive traversal
if root:
inorder(root.left , __UpperCAmelCase )
res.append(root.val )
inorder(root.right , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ):
# Build BST
if len(__UpperCAmelCase ) == 0:
return arr
lowerCAmelCase__ : Tuple = Node(arr[0] )
for i in range(1 , len(__UpperCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase__ : Optional[Any] = []
inorder(__UpperCAmelCase , __UpperCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 364 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 212 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_a = field(
default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a = field(
default=lowerCAmelCase_ , metadata={'help': 'The column name of the images in the files.'} )
_a = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
_a = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
_a = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_a = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : Union[str, Any] )-> List[str]:
lowerCamelCase__ : int ={}
if self.train_dir is not None:
lowerCamelCase__ : str =self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : List[Any] =self.validation_dir
lowerCamelCase__ : List[str] =data_files if data_files else None
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_a = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_a = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_a = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_a = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a = field(default=lowerCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
_a = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_a = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def snake_case__ ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Tuple =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __lowerCamelCase , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Dict =training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase__ : str =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Tuple =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowerCamelCase__ : List[Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : Any =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : Optional[Any] =ds['''train'''].train_test_split(data_args.train_val_split )
lowerCamelCase__ : Dict =split['''train''']
lowerCamelCase__ : int =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[Any] ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : Optional[int] =ViTMAEConfig.from_pretrained(model_args.config_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : List[Any] =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
lowerCamelCase__ : Dict =ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : str =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : int =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
lowerCamelCase__ : Dict =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : Tuple =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase__ : str =ViTMAEForPreTraining(__lowerCamelCase )
if training_args.do_train:
lowerCamelCase__ : List[str] =ds['''train'''].column_names
else:
lowerCamelCase__ : Optional[int] =ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : Any =data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : Any ='''image'''
elif "img" in column_names:
lowerCamelCase__ : Any ='''img'''
else:
lowerCamelCase__ : List[Any] =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ : str =image_processor.size['''shortest_edge''']
else:
lowerCamelCase__ : Optional[int] =(image_processor.size['''height'''], image_processor.size['''width'''])
lowerCamelCase__ : Optional[Any] =Compose(
[
Lambda(lambda __lowerCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowerCamelCase : Union[str, Any] ):
lowerCamelCase__ : str =[transforms(__lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowerCamelCase__ : int =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Any =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCamelCase )
# Compute absolute learning rate
lowerCamelCase__ : Optional[Any] =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ : Dict =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ : int =Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ : Optional[int] =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : List[Any] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Dict =last_checkpoint
lowerCamelCase__ : Tuple =trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : str =trainer.evaluate()
trainer.log_metrics('''eval''' , __lowerCamelCase )
trainer.save_metrics('''eval''' , __lowerCamelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ : List[Any] ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 238 |
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : list )-> None:
lowerCamelCase__ : Tuple =set_counts
lowerCamelCase__ : Dict =max(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =len(lowerCamelCase )
lowerCamelCase__ : List[str] =[1] * num_sets
lowerCamelCase__ : str =list(range(lowerCamelCase ) )
def snake_case ( self : Tuple, lowerCamelCase : int, lowerCamelCase : int )-> bool:
lowerCamelCase__ : List[Any] =self.get_parent(lowerCamelCase )
lowerCamelCase__ : str =self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : int =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCamelCase__ : str =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : List[str] =src_parent
lowerCamelCase__ : Dict =self.set_counts[src_parent]
lowerCamelCase__ : str =max(self.max_set, lowerCamelCase )
return True
def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCamelCase__ : Tuple =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 238 | 1 |
"""simple docstring"""
_snake_case = [0, 2, 4, 6, 8]
_snake_case = [1, 3, 5, 7, 9]
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_a : List[Any] = 0
for digit in range(1_0 ):
_a : int = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , UpperCamelCase__ , UpperCamelCase__ )
return result
_a : List[str] = 0
for digita in range(1_0 ):
_a : str = digita
if (remainder + digita) % 2 == 0:
_a : Union[str, Any] = ODD_DIGITS
else:
_a : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
_a : str = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def lowerCAmelCase__ ( UpperCamelCase__ = 9 ):
'''simple docstring'''
_a : Optional[int] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 324 |
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A =random.Random()
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : int=1.0 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=None ):
'''simple docstring'''
if rng is None:
__UpperCAmelCase : Tuple = global_rng
__UpperCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Optional[Any] , a_ : Optional[Any]=7 , a_ : Tuple=4_00 , a_ : int=20_00 , a_ : int=10 , a_ : Any=1_60 , a_ : Optional[Any]=8 , a_ : Any=0.0 , a_ : int=40_00 , a_ : str=False , a_ : Union[str, Any]=True , ):
'''simple docstring'''
__UpperCAmelCase : str = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = min_seq_length
__UpperCAmelCase : str = max_seq_length
__UpperCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Union[str, Any] = padding_value
__UpperCAmelCase : Union[str, Any] = sampling_rate
__UpperCAmelCase : List[str] = return_attention_mask
__UpperCAmelCase : Optional[int] = do_normalize
__UpperCAmelCase : int = feature_size
__UpperCAmelCase : Union[str, Any] = chunk_length
__UpperCAmelCase : Optional[Any] = hop_length
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : Union[str, Any] , a_ : Optional[int]=False , a_ : Dict=False ):
'''simple docstring'''
def _flatten(a_ : int ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__UpperCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( UpperCAmelCase_ ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = WhisperFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase : Dict = feat_extract_first.to_dict()
__UpperCAmelCase : List[Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Dict = feat_extract_first.mel_filters
__UpperCAmelCase : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
__UpperCAmelCase : Dict = feat_extract_first.to_dict()
__UpperCAmelCase : Dict = feat_extract_second.to_dict()
__UpperCAmelCase : Any = feat_extract_first.mel_filters
__UpperCAmelCase : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Dict = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase : List[str] = feature_extractor(lowerCAmelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
__UpperCAmelCase : int = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__UpperCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : int = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCAmelCase : Union[str, Any] = np.asarray(lowerCAmelCase__ )
__UpperCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__UpperCAmelCase : Union[str, Any] = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test truncation required
__UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__UpperCAmelCase : List[Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
__UpperCAmelCase : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs_truncated]
__UpperCAmelCase : Any = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__UpperCAmelCase : Any = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case__ ( self : str ):
'''simple docstring'''
import torch
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Union[str, Any] = np.random.rand(1_00 , 32 ).astype(np.floataa )
__UpperCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : str = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Dict , a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__UpperCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__UpperCAmelCase : Union[str, Any] = self._load_datasamples(1 )
__UpperCAmelCase : Optional[int] = WhisperFeatureExtractor()
__UpperCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1e-4 ) )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Dict = self._load_datasamples(1 )[0]
__UpperCAmelCase : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__UpperCAmelCase : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ) - 1 ) < 1e-3 ) )
| 226 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( A_ , unittest.TestCase ):
__a : Optional[Any] = MobileBertTokenizer
__a : List[str] = MobileBertTokenizerFast
__a : Union[str, Any] = True
__a : Dict = True
__a : List[Any] = filter_non_english
__a : Dict = """google/mobilebert-uncased"""
def A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self : Any , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def A ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(_lowerCamelCase )
UpperCAmelCase = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(_lowerCamelCase )
UpperCAmelCase = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=_lowerCamelCase )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase )
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.tokenize(_lowerCamelCase )
UpperCAmelCase = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(_lowerCamelCase )
UpperCAmelCase = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase = {}
for i, token in enumerate(_lowerCamelCase ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self : str ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self : Any ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self : int ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , '''do_lower_case''' ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = ['''的''', '''人''', '''有''']
UpperCAmelCase = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 34 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase__ ( A_ ):
def __lt__( self : Any , _lowerCamelCase : int ):
return self[-1] < other[-1]
def __eq__( self : int , _lowerCamelCase : Optional[Any] ):
return self[-1] == other[-1]
def _UpperCAmelCase ( __lowerCamelCase : list ) -> list:
_snake_case = []
# sort into stacks
for element in collection:
_snake_case = Stack([element] )
_snake_case = bisect_left(__lowerCamelCase , __lowerCamelCase )
if i != len(__lowerCamelCase ):
stacks[i].append(__lowerCamelCase )
else:
stacks.append(__lowerCamelCase )
# use a heap-based merge to merge stack efficiently
_snake_case = merge(*(reversed(__lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 288 | 0 |
from math import pi
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 328 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Any:
UpperCamelCase_ = os.path.join(UpperCamelCase_ , F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 328 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __lowercase , __lowercase ) -> list[int]:
'''simple docstring'''
_A = 0
_A = len(__lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_A = i + 1
else:
_A = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 79 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Optional[Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 225 | 0 |
"""simple docstring"""
__lowercase = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowercase ( A_ )-> str:
'''simple docstring'''
assert type(A_ ) in (int, float) and decimal == int(A_ )
a : int = int(A_ )
a : Optional[int] = ""
a : str = False
if decimal < 0:
a : Any = True
decimal *= -1
while decimal > 0:
a : List[Any] = divmod(A_ , 16 )
a : Optional[int] = values[remainder] + hexadecimal
a : Tuple = "0x" + hexadecimal
if negative:
a : Tuple = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
def get_matched_characters(A_ , A_ ) -> str:
a : Optional[int] = []
a : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a : int = int(max(0 , i - limit ) )
a : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A_ )
a : int = F'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}'''
return "".join(A_ )
# matching characters
a : Tuple = get_matched_characters(A_ , A_ )
a : str = get_matched_characters(A_ , A_ )
a : List[str] = len(A_ )
# transposition
a : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(A_ , A_ ) if ca != ca] ) // 2
)
if not match_count:
a : Tuple = 0.0
else:
a : List[str] = (
1
/ 3
* (
match_count / len(A_ )
+ match_count / len(A_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 226 | 0 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
_a : Dict =math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float = 1 / 12345 ) -> int:
_a : Tuple =0
_a : Optional[Any] =0
_a : Optional[Any] =3
while True:
_a : Optional[Any] =(integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_UpperCAmelCase ):
_a : List[Any] =int(_UpperCAmelCase )
total_partitions += 1
if check_partition_perfect(_UpperCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_UpperCAmelCase )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 276 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A : List[str] = logging.get_logger(__name__)
# General docstring
A : List[Any] = "RegNetConfig"
# Base docstring
A : List[Any] = "facebook/regnet-y-040"
A : List[str] = [1, 1088, 7, 7]
# Image classification docstring
A : Tuple = "facebook/regnet-y-040"
A : List[str] = "tabby, tabby cat"
A : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict = 3 , SCREAMING_SNAKE_CASE : str = 1 , SCREAMING_SNAKE_CASE : Dict = 1 , SCREAMING_SNAKE_CASE : Any = "relu" , ):
super().__init__()
_A : List[str] = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , )
_A : List[Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE)
_A : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : int = self.convolution(_SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE)
_A : List[str] = self.activation(_SCREAMING_SNAKE_CASE)
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
super().__init__()
_A : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_A : Optional[int] = config.num_channels
def A ( self : str , SCREAMING_SNAKE_CASE : Tuple):
_A : int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
_A : str = self.embedder(_SCREAMING_SNAKE_CASE)
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple = 2):
super().__init__()
_A : Any = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE)
_A : List[str] = nn.BatchNormad(_SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]):
_A : List[str] = self.convolution(_SCREAMING_SNAKE_CASE)
_A : Optional[Any] = self.normalization(_SCREAMING_SNAKE_CASE)
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]):
super().__init__()
_A : Dict = nn.AdaptiveAvgPoolad((1, 1))
_A : str = nn.Sequential(
nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1) , nn.ReLU() , nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1) , nn.Sigmoid() , )
def A ( self : str , SCREAMING_SNAKE_CASE : Dict):
_A : Dict = self.pooler(_SCREAMING_SNAKE_CASE)
_A : List[str] = self.attention(_SCREAMING_SNAKE_CASE)
_A : List[str] = hidden_state * attention
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] = 1):
super().__init__()
_A : Tuple = in_channels != out_channels or stride != 1
_A : int = max(1 , out_channels // config.groups_width)
_A : Optional[int] = (
RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE) if should_apply_shortcut else nn.Identity()
)
_A : str = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE) , )
_A : Tuple = ACTaFN[config.hidden_act]
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]):
_A : Tuple = hidden_state
_A : List[Any] = self.layer(_SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = self.shortcut(_SCREAMING_SNAKE_CASE)
hidden_state += residual
_A : Tuple = self.activation(_SCREAMING_SNAKE_CASE)
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] = 1):
super().__init__()
_A : Optional[Any] = in_channels != out_channels or stride != 1
_A : str = max(1 , out_channels // config.groups_width)
_A : Any = (
RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE) if should_apply_shortcut else nn.Identity()
)
_A : List[str] = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act) , RegNetSELayer(_SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE) , )
_A : Union[str, Any] = ACTaFN[config.hidden_act]
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : str):
_A : Union[str, Any] = hidden_state
_A : Tuple = self.layer(_SCREAMING_SNAKE_CASE)
_A : Dict = self.shortcut(_SCREAMING_SNAKE_CASE)
hidden_state += residual
_A : Tuple = self.activation(_SCREAMING_SNAKE_CASE)
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any = 2 , SCREAMING_SNAKE_CASE : List[Any] = 2 , ):
super().__init__()
_A : Optional[int] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_A : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for _ in range(depth - 1)] , )
def A ( self : List[str] , SCREAMING_SNAKE_CASE : List[str]):
_A : int = self.layers(_SCREAMING_SNAKE_CASE)
return hidden_state
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any]):
super().__init__()
_A : Optional[int] = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_A : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:]):
self.stages.append(RegNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE))
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str = False , SCREAMING_SNAKE_CASE : Union[str, Any] = True):
_A : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_A : str = hidden_states + (hidden_state,)
_A : str = stage_module(_SCREAMING_SNAKE_CASE)
if output_hidden_states:
_A : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
a = RegNetConfig
a = 'regnet'
a = 'pixel_values'
a = True
def A ( self : Dict , SCREAMING_SNAKE_CASE : Dict):
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def A ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str=False):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
_A : int = value
A : List[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __lowerCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]):
super().__init__(_SCREAMING_SNAKE_CASE)
_A : Optional[int] = config
_A : Tuple = RegNetEmbeddings(_SCREAMING_SNAKE_CASE)
_A : Dict = RegNetEncoder(_SCREAMING_SNAKE_CASE)
_A : Optional[int] = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : str = None):
_A : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_A : Optional[int] = self.embedder(_SCREAMING_SNAKE_CASE)
_A : Dict = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE)
_A : Any = encoder_outputs[0]
_A : Dict = self.pooler(_SCREAMING_SNAKE_CASE)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __lowerCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Union[str, Any]):
super().__init__(_SCREAMING_SNAKE_CASE)
_A : str = config.num_labels
_A : List[Any] = RegNetModel(_SCREAMING_SNAKE_CASE)
# classification head
_A : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : Tuple = None , SCREAMING_SNAKE_CASE : List[Any] = None , SCREAMING_SNAKE_CASE : Tuple = None , ):
_A : Any = return_dict if return_dict is not None else self.config.use_return_dict
_A : str = self.regnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE)
_A : Dict = outputs.pooler_output if return_dict else outputs[1]
_A : str = self.classifier(_SCREAMING_SNAKE_CASE)
_A : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : Any = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : List[Any] = """single_label_classification"""
else:
_A : Any = """multi_label_classification"""
if self.config.problem_type == "regression":
_A : Optional[int] = MSELoss()
if self.num_labels == 1:
_A : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_A : Union[str, Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
_A : Any = CrossEntropyLoss()
_A : Optional[int] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_A : Union[str, Any] = BCEWithLogitsLoss()
_A : Optional[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
_A : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states)
| 360 |
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=None):
_A : Any = data
_A : Optional[Any] = None
def __repr__( self : List[str]):
_A : List[Any] = []
_A : Any = self
while temp:
string_rep.append(F'{temp.data}')
_A : List[Any] = temp.next
return "->".join(SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( lowerCamelCase : list ):
if not elements_list:
raise Exception('The Elements List is empty' )
_A : Union[str, Any] = Node(elements_list[0] )
for i in range(1 ,len(lowerCamelCase ) ):
_A : Dict = Node(elements_list[i] )
_A : int = current.next
return head
def lowerCAmelCase__ ( lowerCamelCase : Node ):
if head_node is not None and isinstance(lowerCamelCase ,lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase__ ( ):
from doctest import testmod
testmod()
_A : List[str] = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCamelCase )
print('Elements in Reverse:' )
print_reverse(lowerCamelCase )
if __name__ == "__main__":
main()
| 227 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : Optional[int] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : str = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Union[str, Any] = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = output.prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Tuple = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = output.prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
if str(lowerCamelCase_ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 323 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
a_ : Tuple = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
a_ : Optional[int] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = cached_file(a , a)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(a))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(a , a)))
with open(os.path.join(a , 'refs' , 'main')) as f:
SCREAMING_SNAKE_CASE = f.read()
self.assertEqual(a , os.path.join(a , 'snapshots' , a , a))
self.assertTrue(os.path.isfile(a))
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE = cached_file(a , a)
self.assertEqual(a , a)
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE = cached_file(a , a , revision='9b8c223')
self.assertEqual(a , os.path.join(a , 'snapshots' , a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(a , 'is not a valid model identifier'):
SCREAMING_SNAKE_CASE = cached_file('tiny-random-bert' , a)
with self.assertRaisesRegex(a , 'is not a valid git identifier'):
SCREAMING_SNAKE_CASE = cached_file(a , a , revision='aaaa')
with self.assertRaisesRegex(a , 'does not appear to have a file named'):
SCREAMING_SNAKE_CASE = cached_file(a , 'conf')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
with self.assertRaisesRegex(a , 'does not appear to have a file named'):
SCREAMING_SNAKE_CASE = cached_file(a , 'conf')
with open(os.path.join(a , 'refs' , 'main')) as f:
SCREAMING_SNAKE_CASE = f.read()
self.assertTrue(os.path.isfile(os.path.join(a , '.no_exist' , a , 'conf')))
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , _raise_exceptions_for_missing_entries=a)
self.assertIsNone(a)
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , local_files_only=a , _raise_exceptions_for_missing_entries=a)
self.assertIsNone(a)
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a) as mock_head:
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , _raise_exceptions_for_connection_errors=a)
self.assertIsNone(a)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(a , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , a)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(a , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , a , revision='ahaha')
SCREAMING_SNAKE_CASE = get_file_from_repo('bert-base-cased' , a)
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE = json.loads(open(a , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = Path(a) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(a , 'a.txt') , str(a))
self.assertIsNone(get_file_from_repo(a , 'b.txt'))
| 137 | 0 |
lowerCamelCase_ : Optional[Any] = 2_56
# Modulus to hash a string
lowerCamelCase_ : Union[str, Any] = 1_00_00_03
def A__ ( lowerCamelCase , lowerCamelCase ) -> bool:
UpperCamelCase_: Dict = len(lowerCamelCase )
UpperCamelCase_: str = len(lowerCamelCase )
if p_len > t_len:
return False
UpperCamelCase_: str = 0
UpperCamelCase_: int = 0
UpperCamelCase_: Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase ):
UpperCamelCase_: List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase_: Optional[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase_: List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase_: Optional[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def A__ ( ) -> None:
UpperCamelCase_: List[Any] = """abc1abc12"""
UpperCamelCase_: Optional[int] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
UpperCamelCase_: List[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowerCamelCase , lowerCamelCase ) and not rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 2)
UpperCamelCase_: str = """ABABX"""
UpperCamelCase_: Optional[int] = """ABABZABABYABABX"""
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 3)
UpperCamelCase_: int = """AAAB"""
UpperCamelCase_: Any = """ABAAAAAB"""
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 4)
UpperCamelCase_: List[str] = """abcdabcy"""
UpperCamelCase_: Any = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 5)
UpperCamelCase_: List[Any] = """Lü"""
UpperCamelCase_: Tuple = """Lüsai"""
assert rabin_karp(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = """Lue"""
assert not rabin_karp(lowerCamelCase , lowerCamelCase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 352 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : str = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = """lxmert"""
__UpperCamelCase : Optional[Any] = {}
def __init__( self : Dict , snake_case_ : List[str]=3_0522 , snake_case_ : int=768 , snake_case_ : str=12 , snake_case_ : Tuple=9500 , snake_case_ : Optional[Any]=1600 , snake_case_ : str=400 , snake_case_ : List[str]=3072 , snake_case_ : Dict="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=0.02 , snake_case_ : Union[str, Any]=1e-12 , snake_case_ : Tuple=9 , snake_case_ : Tuple=5 , snake_case_ : Union[str, Any]=5 , snake_case_ : List[str]=2048 , snake_case_ : Any=4 , snake_case_ : int=6.67 , snake_case_ : Dict=True , snake_case_ : Tuple=True , snake_case_ : int=True , snake_case_ : str=True , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : List[str]=True , **snake_case_ : Optional[int] , ):
UpperCamelCase_: int = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[Any] = num_attention_heads
UpperCamelCase_: List[str] = hidden_act
UpperCamelCase_: int = intermediate_size
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: List[Any] = attention_probs_dropout_prob
UpperCamelCase_: List[str] = max_position_embeddings
UpperCamelCase_: Optional[Any] = type_vocab_size
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: str = layer_norm_eps
UpperCamelCase_: List[str] = num_qa_labels
UpperCamelCase_: List[str] = num_object_labels
UpperCamelCase_: Tuple = num_attr_labels
UpperCamelCase_: List[Any] = l_layers
UpperCamelCase_: List[Any] = x_layers
UpperCamelCase_: Any = r_layers
UpperCamelCase_: str = visual_feat_dim
UpperCamelCase_: Union[str, Any] = visual_pos_dim
UpperCamelCase_: str = visual_loss_normalizer
UpperCamelCase_: Dict = task_matched
UpperCamelCase_: Tuple = task_mask_lm
UpperCamelCase_: List[str] = task_obj_predict
UpperCamelCase_: Any = task_qa
UpperCamelCase_: Dict = visual_obj_loss
UpperCamelCase_: Optional[Any] = visual_attr_loss
UpperCamelCase_: List[Any] = visual_feat_loss
UpperCamelCase_: Tuple = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**snake_case_ )
| 223 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] = logging.get_logger(__name__)
a__ : List[Any] = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if "emb" in name:
__SCREAMING_SNAKE_CASE = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__SCREAMING_SNAKE_CASE = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__SCREAMING_SNAKE_CASE = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__SCREAMING_SNAKE_CASE = name.replace("linear1" , "fc1" )
if "linear2" in name:
__SCREAMING_SNAKE_CASE = name.replace("linear2" , "fc2" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__SCREAMING_SNAKE_CASE = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(state_dict.keys() )
__SCREAMING_SNAKE_CASE = {}
for key in keys:
__SCREAMING_SNAKE_CASE = state_dict.pop(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
__SCREAMING_SNAKE_CASE = val[:hidden_size, :]
__SCREAMING_SNAKE_CASE = val[hidden_size : 2 * hidden_size, :]
__SCREAMING_SNAKE_CASE = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__SCREAMING_SNAKE_CASE = val
else:
__SCREAMING_SNAKE_CASE = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
elif checkpoint == "medium":
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 24
elif checkpoint == "large":
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__SCREAMING_SNAKE_CASE = MusicgenDecoderConfig(
hidden_size=lowerCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCAmelCase_ , num_attention_heads=lowerCAmelCase_ , )
return config
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="cpu" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MusicGen.get_pretrained(lowerCAmelCase_ , device=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = decoder_config_from_checkpoint(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = fairseq_model.lm.state_dict()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rename_state_dict(
lowerCAmelCase_ , hidden_size=decoder_config.hidden_size )
__SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("t5-base" )
__SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__SCREAMING_SNAKE_CASE = MusicgenForCausalLM(lowerCAmelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = decoder.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__SCREAMING_SNAKE_CASE = MusicgenForConditionalGeneration(text_encoder=lowerCAmelCase_ , audio_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCAmelCase_ )
# check we can do a forward pass
__SCREAMING_SNAKE_CASE = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__SCREAMING_SNAKE_CASE = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("t5-base" )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__SCREAMING_SNAKE_CASE = MusicgenProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
# set the appropriate bos/pad token ids
__SCREAMING_SNAKE_CASE = 2048
__SCREAMING_SNAKE_CASE = 2048
# set other default generation config params
__SCREAMING_SNAKE_CASE = int(30 * audio_encoder.config.frame_rate )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 3.0
if pytorch_dump_folder is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCAmelCase_ )
processor.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
a__ : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 54 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
a__ : Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a__ : List[str] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
a__ : Dict = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=False ) -> Optional[int]:
if concatenate_texts:
return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"]
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 54 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_A = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = generator.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = KandinskyVaaImgaImgPipeline
__magic_name__ :str = ["""image_embeds""", """negative_image_embeds""", """image"""]
__magic_name__ :Union[str, Any] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__magic_name__ :Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__magic_name__ :Any = False
@property
def snake_case ( self ):
'''simple docstring'''
return 3_2
@property
def snake_case ( self ):
'''simple docstring'''
return 3_2
@property
def snake_case ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case ( self ):
'''simple docstring'''
return 1_0_0
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ :str = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def snake_case ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.dummy_unet
lowerCAmelCase__ :Optional[Any] = self.dummy_movq
lowerCAmelCase__ :int = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase__ :Tuple = DDIMScheduler(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
lowerCAmelCase__ :Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ :int = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Any = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 'cpu'
lowerCAmelCase__ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ :str = self.pipeline_class(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = output.images
lowerCAmelCase__ :Tuple = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ :Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase__ :Optional[Any] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
lowerCAmelCase__ :int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ :List[str] = 'A red cartoon frog, 4k'
lowerCAmelCase__ :Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase__ :Optional[Any] = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :Tuple = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ :Dict = pipeline(
image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
lowerCAmelCase__ :int = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 293 |
from __future__ import annotations
import math
lowerCamelCase__ = """2020.9.26"""
lowerCamelCase__ = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float]:
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in locals().values() ):
lowerCAmelCase__ : List[str] = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = ((x * distance) / (z + distance)) * scale
lowerCAmelCase__ : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float, float]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Axis must be a str' )
lowerCAmelCase__ : Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase__ : List[Any] = (
'Input values except axis must either be float or int: '
F'''{list(input_variables.values() )}'''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCAmelCase__ : Tuple = x * math.cos(SCREAMING_SNAKE_CASE_ ) - y * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = y * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = z
elif axis == "x":
lowerCAmelCase__ : Dict = y * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + y * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = x
elif axis == "y":
lowerCAmelCase__ : str = x * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 212 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A__ : Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler("""sample_euler""" )
A__ : Dict = """A painting of a squirrel eating a burger"""
A__ : Optional[int] = torch.manual_seed(0 )
A__ : str = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
A__ : Any = output.images
A__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : List[str] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
A__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A__ : Any = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler("""sample_euler""" )
A__ : List[str] = """A painting of a squirrel eating a burger"""
A__ : int = torch.manual_seed(0 )
A__ : Any = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
A__ : Dict = output.images
A__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : List[str] = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __A ( self ):
A__ : Any = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A__ : List[Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
A__ : int = """A painting of a squirrel eating a burger"""
A__ : List[Any] = torch.manual_seed(0 )
A__ : Tuple = sd_pipe(
[prompt] , generator=A__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=A__ , )
A__ : List[Any] = output.images
A__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : str = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 366 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Optional[int] = 32
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 , lowercase_: str = "bert-base-cased" ) -> List[str]:
A__ : int = AutoTokenizer.from_pretrained(lowercase_ )
A__ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_: Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : int = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int] ) -> int:
model.eval()
A__ : str = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Any = model(**lowercase_ )
A__ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
A__ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ : int = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase (lowercase_: List[Any] , lowercase_: str ) -> List[str]:
# Initialize accelerator
A__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : List[Any] = config["""lr"""]
A__ : Union[str, Any] = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Optional[Any] = int(config["""batch_size"""] )
A__ : Tuple = args.model_name_or_path
set_seed(lowercase_ )
A__ , A__ : Optional[Any] = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
A__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ : Optional[int] = 1
A__ : Optional[int] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
A__ : int = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
A__ : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
A__ : Any = 0
A__ : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
A__ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
A__ : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ : Dict = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ : Any = int(lowercase_ ) + 1
A__ : Any = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print("""resumed checkpoint performance:""" , lowercase_ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
A__ : int = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ : Optional[Any] = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
A__ : int = model(**lowercase_ )
A__ : int = outputs.loss
A__ : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ : Any = f"""epoch_{epoch}"""
A__ : int = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
A__ : List[Any] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : Tuple = accuracy
A__ : Optional[Any] = lr_scheduler.get_lr()[0]
A__ : Tuple = optimizer.param_groups[0]["""lr"""]
A__ : int = epoch
A__ : int = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase () -> int:
A__ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase_ , default=lowercase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase_ , default=lowercase_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=2 , help="""Number of train epochs.""" , )
A__ : List[str] = parser.parse_args()
A__ : List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 141 | 0 |
'''simple docstring'''
lowercase__ : Tuple = [0, 2, 4, 6, 8]
lowercase__ : Any = [1, 3, 5, 7, 9]
def a__ ( lowercase : int, lowercase : int, lowercase : list[int], lowercase : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCamelCase = 0
for digit in range(10 ):
_UpperCamelCase = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, lowercase, lowercase )
return result
_UpperCamelCase = 0
for digita in range(10 ):
_UpperCamelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCamelCase = ODD_DIGITS
else:
_UpperCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCamelCase = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, lowercase, lowercase, )
return result
def a__ ( lowercase : int = 9 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(lowercase, 0, [0] * length, lowercase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 324 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase_ = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCamelCase_ = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase_ = sorted(arg_to_scheduler.keys())
lowerCamelCase_ = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class __lowerCamelCase ( pl.LightningModule ):
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase="base" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ) -> List[Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCamelCase )
snake_case_ = 0
snake_case_ = Path(self.hparams.output_dir )
snake_case_ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case_ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=lowerCamelCase , **lowerCamelCase , )
else:
snake_case_ = config
snake_case_ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , lowerCamelCase , lowerCamelCase ):
assert hasattr(self.config , lowerCamelCase ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowerCamelCase , getattr(self.hparams , lowerCamelCase ) )
if tokenizer is None:
snake_case_ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCamelCase , )
else:
snake_case_ = tokenizer
snake_case_ = MODEL_MODES[mode]
if model is None:
snake_case_ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowerCamelCase , )
else:
snake_case_ = model
def lowerCAmelCase_ ( self , *lowerCamelCase , **lowerCamelCase ) -> int:
snake_case_ = self.model_type.from_pretrained(*lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case_ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case_ = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.model
snake_case_ = ["""bias""", """LayerNorm.weight"""]
snake_case_ = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
snake_case_ = Adafactor(
lowerCamelCase , lr=self.hparams.learning_rate , scale_parameter=lowerCamelCase , relative_step=lowerCamelCase )
else:
snake_case_ = AdamW(
lowerCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case_ = optimizer
snake_case_ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> List[str]:
return self.validation_step(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Tuple:
return self.validation_end(lowerCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case_ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Any:
if stage == "test":
snake_case_ = len(self.test_dataloader().dataset )
else:
snake_case_ = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=lowerCamelCase )
snake_case_ = len(self.train_dataloader().dataset )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> List[str]:
raise NotImplementedError("""You must implement this for your task""" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return self.train_loader
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Tuple:
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
lowerCamelCase , list(filter(lowerCamelCase , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase_ ( self , lowerCamelCase ) -> None:
snake_case_ = self.output_dir.joinpath("""best_tfmr""" )
snake_case_ = self.step_count
self.model.save_pretrained(lowerCamelCase )
self.tokenizer.save_pretrained(lowerCamelCase )
@staticmethod
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ) -> str:
parser.add_argument(
"""--model_name_or_path""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCamelCase , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCamelCase , type=lowerCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(lowerCamelCase ).parent / """test_run""" / """cache""" ) , type=lowerCamelCase , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=lowerCamelCase , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=lowerCamelCase , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=lowerCamelCase , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=lowerCamelCase , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5e-5 , type=lowerCamelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=lowerCamelCase , metavar=lowerCamelCase , type=lowerCamelCase , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCamelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=lowerCamelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCamelCase , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=lowerCamelCase , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=lowerCamelCase )
parser.add_argument("""--train_batch_size""" , default=32 , type=lowerCamelCase )
parser.add_argument("""--eval_batch_size""" , default=32 , type=lowerCamelCase )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __lowerCamelCase ( pl.Callback ):
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCamelCase ( pl.Callback ):
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCamelCase )
class __lowerCamelCase ( pl.Callback ):
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
snake_case_ = trainer.lr_schedulers[0]["""scheduler"""]
snake_case_ = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> int:
rank_zero_info("""***** Validation results *****""" )
snake_case_ = trainer.callback_metrics
# Log results
for key in sorted(lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(lowerCamelCase , str(metrics[key] ) ) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> int:
rank_zero_info("""***** Test results *****""" )
snake_case_ = trainer.callback_metrics
# Log and save results to file
snake_case_ = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(lowerCamelCase , """w""" ) as writer:
for key in sorted(lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(lowerCamelCase , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(lowerCamelCase , str(metrics[key] ) ) )
def UpperCamelCase( lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
parser.add_argument(
"""--output_dir""" , default=str(Path(lowercase_ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowercase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowercase_ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowercase_ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowercase_ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowercase_ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowercase_ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowercase_ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=[] , lowercase_=None , lowercase_=None , **lowercase_ , ) -> int:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
snake_case_ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase_ )
# add custom checkpoints
if checkpoint_callback is None:
snake_case_ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase_ )
if logging_callback is None:
snake_case_ = LoggingCallback()
snake_case_ = {}
if args.fpaa:
snake_case_ = 16
if args.gpus > 1:
snake_case_ = """auto"""
snake_case_ = """ddp"""
snake_case_ = args.accumulate_grad_batches
snake_case_ = None
snake_case_ = """auto"""
snake_case_ = pl.Trainer.from_argparse_args(
lowercase_ , weights_summary=lowercase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase_ , )
if args.do_train:
trainer.fit(lowercase_ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 353 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'levit'
def __init__( self , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 8, 12] , lowerCamelCase=[4, 4, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4 | 34 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = False, False, False
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = None
# Automatically constructed
lowerCamelCase = "dict"
lowerCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase = field(default='''Audio''', init=__A, repr=__A )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A_ : Any = BytesIO()
sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A_ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
A_ : Optional[int] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2767
A_ : int = BytesIO(bytes() )
sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
A_ , A_ : Union[str, Any] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
A_ : List[str] = xsplitext(_lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
A_ : Optional[int] = token_per_repo_id or {}
A_ : Tuple = path.split("""::""" )[-1]
try:
A_ : Optional[Any] = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL )["""repo_id"""]
A_ : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A_ : Any = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase ) as f:
A_ , A_ : str = sf.read(_lowerCamelCase )
else:
A_ , A_ : Optional[Any] = sf.read(_lowerCamelCase )
A_ : List[Any] = array.T
if self.mono:
A_ : int = librosa.to_mono(_lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A_ : List[str] = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate )
A_ : Any = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
A_ : Any = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
A_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : List[str] = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
A_ : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
A_ : List[str] = pa.array([Audio().encode_example(_lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ : Optional[Any] = storage.field("""bytes""" )
else:
A_ : Dict = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ : List[str] = storage.field("""path""" )
else:
A_ : Tuple = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
A_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase ):
with xopen(_lowerCamelCase , """rb""" ) as f:
A_ : Optional[int] = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : Tuple = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
| 344 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : str =logging.getLogger(__name__)
@dataclass
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "Whether to SortishSamler or not."} )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "whether to use adafactor"} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(default=A__ , metadata={"help": "Dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[str] = field(
default="linear" , metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 350 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ (ctypes.Structure ):
"""simple docstring"""
__lowerCAmelCase :Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : int = CursorInfo()
a__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : List[Any] = CursorInfo()
a__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266 | 0 |
from math import pi
def A_ ( snake_case : int , snake_case : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 328 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
def A ( _lowercase ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | import re
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(_lowercase , _lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 258 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 226 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {'''vocab_file''': '''vocab.json'''}
lowercase : Dict = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowercase : int = {'''mgp-str''': 27}
class lowerCamelCase__ ( a_):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , a :Union[str, Any] , a :Any="[GO]" , a :List[Any]="[GO]" , a :str="[s]" , a :str="[GO]" , **a :int ) -> str:
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
__UpperCamelCase : List[str] = json.load(lowercase_ )
__UpperCamelCase : List[str] = {v: k for k, v in self.vocab.items()}
@property
def _lowerCamelCase ( self :Dict ) -> Tuple:
return len(self.vocab )
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCamelCase ( self :Tuple , a :Optional[int] ) -> Any:
__UpperCamelCase : Optional[Any] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def _lowerCamelCase ( self :str , a :Union[str, Any] ) -> Any:
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def _lowerCamelCase ( self :Dict , a :int ) -> Tuple:
return self.decoder.get(lowercase_ )
def _lowerCamelCase ( self :Optional[int] , a :str , a :Optional[str] = None ) -> List[Any]:
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
__UpperCamelCase : List[str] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,) | 358 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : List[str] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : List[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str) -> tuple[str, float]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = len([g for position, g in enumerate(_lowerCamelCase) if g == main_target[position]])
return (item, float(_lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str) -> tuple[str, str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = random.randint(0 , len(_lowerCamelCase) - 1)
__UpperCamelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : list[str]) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = list(_lowerCamelCase)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
__UpperCamelCase : Optional[int] = random.choice(_lowerCamelCase)
return "".join(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : tuple[str, float] , _lowerCamelCase : list[tuple[str, float]] , _lowerCamelCase : list[str] , ) -> list[str]:
'''simple docstring'''
__UpperCamelCase : str = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase : str = int(parent_a[1] * 100) + 1
__UpperCamelCase : Tuple = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase):
__UpperCamelCase : str = population_score[random.randint(0 , _lowerCamelCase)][0]
__UpperCamelCase , __UpperCamelCase : Optional[Any] = crossover(parent_a[0] , _lowerCamelCase)
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase))
pop.append(mutate(_lowerCamelCase , _lowerCamelCase))
return pop
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : list[str] , _lowerCamelCase : bool = True) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase : Optional[Any] = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowerCamelCase)
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase : Any = sorted({c for c in target if c not in genes})
if not_in_genes_list:
__UpperCamelCase : Any = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowerCamelCase)
# Generate random starting population.
__UpperCamelCase : Union[str, Any] = []
for _ in range(_lowerCamelCase):
population.append("".join([random.choice(_lowerCamelCase) for i in range(len(_lowerCamelCase))]))
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase : Optional[Any] = [evaluate(_lowerCamelCase , _lowerCamelCase) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase : List[str] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1] , reverse=_lowerCamelCase)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase : Tuple = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(_lowerCamelCase)
# Normalize population score to be between 0 and 1.
__UpperCamelCase : Optional[Any] = [
(item, score / len(_lowerCamelCase)) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase):
population.extend(select(population_score[int(_lowerCamelCase)] , _lowerCamelCase , _lowerCamelCase))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : Any = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowercase : Optional[Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowercase , lowercase , lowercase : Union[str, Any] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
) | 151 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ :Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =XGLMTokenizer
UpperCamelCase__ : Union[str, Any] =XGLMTokenizerFast
UpperCamelCase__ : Any =True
UpperCamelCase__ : Tuple =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : int =XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='<pad>'
__UpperCamelCase : str =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(lowerCamelCase__ ) , 1008 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCamelCase : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__UpperCamelCase : int =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCamelCase : str =tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __lowercase ( self ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__ , f.name )
__UpperCamelCase : str =XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ )
__UpperCamelCase : Tuple =pickle.dumps(lowerCamelCase__ )
pickle.loads(lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__UpperCamelCase : int =self.get_tokenizer()
__UpperCamelCase : List[Any] =self.get_rust_tokenizer()
__UpperCamelCase : Tuple ='I was born in 92000, and this is falsé.'
__UpperCamelCase : Optional[int] =tokenizer.tokenize(lowerCamelCase__ )
__UpperCamelCase : Tuple =rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Dict =rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =self.get_rust_tokenizer()
__UpperCamelCase : Union[str, Any] =tokenizer.encode(lowerCamelCase__ )
__UpperCamelCase : str =rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict ='Hello World!'
__UpperCamelCase : int =[2, 31227, 4447, 35]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
__UpperCamelCase : Tuple =[2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple ={
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='facebook/xglm-564M' , padding=lowerCamelCase__ , )
| 71 |
import cmath
import math
def a( A : float , A : float , A : float , A : float ) -> complex:
"""simple docstring"""
a = math.radians(A )
a = math.radians(A )
# Convert voltage and current to rectangular form
a = cmath.rect(A , A )
a = cmath.rect(A , A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 0 |
'''simple docstring'''
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase_ = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def __magic_name__ ( A , A=None , A=None , A=None ) -> str:
snake_case = True
while ask_again:
snake_case = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def __magic_name__ ( A , A=[] , A=None , A=0 ) -> str:
snake_case = BulletMenu(_snake_case , _snake_case )
snake_case = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def __magic_name__ ( A ) -> List[str]:
snake_case = int(_snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def __magic_name__ ( A ) -> Optional[int]:
snake_case = int(_snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def __magic_name__ ( A ) -> Tuple:
snake_case = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __magic_name__ ( A ) -> List[Any]:
snake_case = int(_snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def __magic_name__ ( A ) -> Optional[int]:
snake_case = int(_snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def __magic_name__ ( A ) -> Tuple:
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
snake_case = super()._format_usage(a_, a_, a_, a_ )
snake_case = usage.replace('<command> [<args>] ', '' )
return usage
| 352 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
__lowercase = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
__lowercase = '''</w>'''
__lowercase = '''@@ '''
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : List[str] = set()
a : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : int = char
return pairs
# Speech2Text2 has no max input length
__lowercase = {'''facebook/s2t-wav2vec2-large-en-de''': 1024}
class _A ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Tuple="<unk>" , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Dict , ):
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
a : Any = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8") as vocab_handle:
a : Dict = json.load(__UpperCAmelCase)
a : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''')
a : Any = None
a : int = None
else:
with open(__UpperCAmelCase , encoding="utf-8") as merges_handle:
a : List[str] = merges_handle.read().split("\n")[:-1]
a : Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
a : List[str] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a : str = {}
@property
def __snake_case ( self : List[Any]):
return len(self.decoder)
def __snake_case ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Tuple):
a : List[str] = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
a : Dict = get_pairs(__UpperCAmelCase)
if not pairs:
return token
while True:
a : Any = min(__UpperCAmelCase , key=lambda __UpperCAmelCase: self.bpe_ranks.get(__UpperCAmelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
a : Optional[int] = bigram
a : Tuple = []
a : Optional[int] = 0
while i < len(__UpperCAmelCase):
try:
a : List[Any] = word.index(__UpperCAmelCase , __UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a : Union[str, Any] = j
if word[i] == first and i < len(__UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a : Optional[Any] = tuple(__UpperCAmelCase)
a : Tuple = new_word
if len(__UpperCAmelCase) == 1:
break
else:
a : Tuple = get_pairs(__UpperCAmelCase)
a : Any = " ".join(__UpperCAmelCase)
if word == "\n " + BPE_TOKEN_MERGES:
a : int = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase):
a : int = word.replace(__UpperCAmelCase , "")
a : Dict = word.replace(" " , __UpperCAmelCase)
a : int = word
return word
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
a : List[Any] = text.lower()
a : Tuple = text.split()
a : int = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase).split(" ")))
return split_tokens
def __snake_case ( self : Any , __UpperCAmelCase : str):
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token))
def __snake_case ( self : Tuple , __UpperCAmelCase : int):
a : Any = self.decoder.get(__UpperCAmelCase , self.unk_token)
return result
def __snake_case ( self : int , __UpperCAmelCase : List[str]):
a : Optional[Any] = " ".join(__UpperCAmelCase)
# make sure @@ tokens are concatenated
a : int = "".join(string.split(__UpperCAmelCase))
return string
def __snake_case ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(__UpperCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
a : str = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a : List[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__UpperCAmelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase) + "\n")
a : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!")
a : Optional[Any] = token_index
writer.write(" ".join(__UpperCAmelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 40 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class a_ ( _lowerCAmelCase ):
__A = "autoformer"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Any , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "student_t" , lowercase : str = "nll" , lowercase : int = 1 , lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase : bool = True , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : Optional[List[int]] = None , lowercase : Optional[List[int]] = None , lowercase : int = 64 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 32 , lowercase : int = 32 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 100 , lowercase : float = 0.02 , lowercase : bool = True , lowercase : int=True , lowercase : int = 10 , lowercase : int = 25 , lowercase : int = 3 , **lowercase : Dict , ):
"""simple docstring"""
lowercase_ :str = prediction_length
lowercase_ :Dict = context_length if context_length is not None else prediction_length
lowercase_ :Any = distribution_output
lowercase_ :Tuple = loss
lowercase_ :Dict = input_size
lowercase_ :Tuple = num_time_features
lowercase_ :int = lags_sequence
lowercase_ :Tuple = scaling
lowercase_ :List[Any] = num_dynamic_real_features
lowercase_ :Union[str, Any] = num_static_real_features
lowercase_ :str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Optional[int] = cardinality
else:
lowercase_ :Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Tuple = embedding_dimension
else:
lowercase_ :Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase_ :Any = num_parallel_samples
# Transformer architecture configuration
lowercase_ :Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase_ :Union[str, Any] = d_model
lowercase_ :Optional[Any] = encoder_attention_heads
lowercase_ :Optional[Any] = decoder_attention_heads
lowercase_ :Optional[int] = encoder_ffn_dim
lowercase_ :int = decoder_ffn_dim
lowercase_ :Any = encoder_layers
lowercase_ :Optional[Any] = decoder_layers
lowercase_ :Dict = dropout
lowercase_ :Dict = attention_dropout
lowercase_ :str = activation_dropout
lowercase_ :int = encoder_layerdrop
lowercase_ :Dict = decoder_layerdrop
lowercase_ :List[str] = activation_function
lowercase_ :int = init_std
lowercase_ :Optional[int] = use_cache
# Autoformer
lowercase_ :List[str] = label_length
lowercase_ :List[str] = moving_average
lowercase_ :Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowercase__ ( self : int ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 223 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case_ (__A : Tuple , __A : int ) -> Tuple:
__lowerCAmelCase : Optional[Any] = int(__A )
assert noofclusters < len(__A )
# Find out the dimensionality
__lowerCAmelCase : int = len(vectors[0] )
# Will help select random centroids from among the available vectors
__lowerCAmelCase : List[str] = list(range(len(__A ) ) )
shuffle(__A )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__lowerCAmelCase : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__lowerCAmelCase : List[str] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__lowerCAmelCase : str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__A )
]
##These nodes will assign the centroid Variables the appropriate
##values
__lowerCAmelCase : List[Any] = tf.placeholder("""float64""" , [dim] )
__lowerCAmelCase : List[str] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__A , __A ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__lowerCAmelCase : Any = [tf.Variable(0 ) for i in range(len(__A ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__lowerCAmelCase : str = tf.placeholder("""int32""" )
__lowerCAmelCase : List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__A , __A ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__lowerCAmelCase : Any = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__lowerCAmelCase : Any = tf.reduce_mean(__A , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" , [dim] )
__lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" , [dim] )
__lowerCAmelCase : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__A , __A ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__lowerCAmelCase : List[str] = tf.placeholder("""float""" , [noofclusters] )
__lowerCAmelCase : List[str] = tf.argmin(__A , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__lowerCAmelCase : Union[str, Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(__A )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__lowerCAmelCase : Optional[int] = 1_0_0
for _ in range(__A ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__A ) ):
__lowerCAmelCase : Tuple = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__lowerCAmelCase : List[Any] = [
sess.run(__A , feed_dict={va: vect, va: sess.run(__A )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__lowerCAmelCase : List[Any] = sess.run(
__A , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__A ):
# Collect all the vectors assigned to this cluster
__lowerCAmelCase : Optional[int] = [
vectors[i]
for i in range(len(__A ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__lowerCAmelCase : Dict = sess.run(
__A , feed_dict={mean_input: array(__A )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__lowerCAmelCase : List[Any] = sess.run(__A )
__lowerCAmelCase : int = sess.run(__A )
return centroids, assignments
| 139 |
from __future__ import annotations
def snake_case_ (__A : list[float] , __A : list[float] ) -> float:
__lowerCAmelCase : Union[str, Any] = sorted(numsa + numsa )
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
__UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 139 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : List[Any] = """layoutlmv3"""
def __init__( self : Optional[int] , _lowerCamelCase : str=5_02_65 , _lowerCamelCase : Any=7_68 , _lowerCamelCase : int=12 , _lowerCamelCase : str=12 , _lowerCamelCase : int=30_72 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Any=5_12 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Any=0 , _lowerCamelCase : int=2 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Dict=1_28 , _lowerCamelCase : int=1_28 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : int=32 , _lowerCamelCase : int=1_28 , _lowerCamelCase : Tuple=64 , _lowerCamelCase : List[Any]=2_56 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=2_24 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(
vocab_size=_lowerCamelCase , hidden_size=_lowerCamelCase , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , intermediate_size=_lowerCamelCase , hidden_act=_lowerCamelCase , hidden_dropout_prob=_lowerCamelCase , attention_probs_dropout_prob=_lowerCamelCase , max_position_embeddings=_lowerCamelCase , type_vocab_size=_lowerCamelCase , initializer_range=_lowerCamelCase , layer_norm_eps=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
A_ : List[Any] = max_ad_position_embeddings
A_ : List[str] = coordinate_size
A_ : Tuple = shape_size
A_ : Optional[Any] = has_relative_attention_bias
A_ : Any = rel_pos_bins
A_ : str = max_rel_pos
A_ : Optional[int] = has_spatial_attention_bias
A_ : int = rel_ad_pos_bins
A_ : Tuple = max_rel_ad_pos
A_ : int = text_embed
A_ : List[Any] = visual_embed
A_ : str = input_size
A_ : Dict = num_channels
A_ : Optional[int] = patch_size
A_ : Dict = classifier_dropout
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = version.parse("""1.12""")
@property
def a_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def a_ ( self : int ):
"""simple docstring"""
return 1E-5
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return 12
def a_ ( self : Optional[int] , _lowerCamelCase : "ProcessorMixin" , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional["TensorType"] = None , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 40 , _lowerCamelCase : int = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , _lowerCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Tuple = processor.tokenizer.num_special_tokens_to_add(_lowerCamelCase )
A_ : List[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
A_ : int = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A_ : Optional[int] = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A_ : str = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = dict(
processor(
_lowerCamelCase , text=_lowerCamelCase , boxes=_lowerCamelCase , return_tensors=_lowerCamelCase , ) )
return inputs
| 167 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowercase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase :Optional[Any] = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__UpperCamelCase :Union[str, Any] = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__UpperCamelCase :List[Any] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=SCREAMING_SNAKE_CASE , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=SCREAMING_SNAKE_CASE , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__UpperCamelCase :List[str] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=SCREAMING_SNAKE_CASE , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase :Tuple = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase :Optional[int] = defaults.commands
if not args.tpu_name:
__UpperCamelCase :Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase :Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase :List[Any] = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__UpperCamelCase :List[str] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__UpperCamelCase :str = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase :Union[str, Any] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__UpperCamelCase :int = '''; '''.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase :Optional[Any] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(SCREAMING_SNAKE_CASE )}""" )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('''Successfully setup pod.''' )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = tpu_command_parser()
__UpperCamelCase :List[str] = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 105 | import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 105 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
__lowercase =0.0
for coeff in reversed(lowercase__ ):
__lowercase =result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 141 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __lowercase ( __lowercase ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 100 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18 )
else:
return datetime(__lowercase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCamelCase_ = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 174 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]="" , __UpperCAmelCase : List[str]="train" ):
'''simple docstring'''
assert os.path.isdir(__UpperCAmelCase )
_A = []
_A = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_A = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self : str ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.documents[idx]
_A = document_path.split("/" )[-1]
with open(__UpperCAmelCase , encoding="utf-8" ) as source:
_A = source.read()
_A , _A = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
_A = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
_A = []
_A = deque(__lowercase )
while True:
try:
_A = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_A = list(filter(lambda __lowercase : not t.startswith("@highlight" ) , __lowercase ) )
return story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = torch.ones_like(__lowercase )
_A = sequence == pad_token_id
_A = 0
return mask
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = [tokenizer.encode(__lowercase ) for line in story_lines]
_A = [token for sentence in story_lines_token_ids for token in sentence]
_A = [tokenizer.encode(__lowercase ) for line in summary_lines]
_A = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = []
for sequence in batch:
_A = -1
_A = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase )
| 174 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowercase ( __a ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> int:
_A : Any = parent
_A : Optional[Any] = batch_size
_A : int = seq_length
_A : List[Any] = is_training
_A : List[Any] = use_input_mask
_A : Optional[int] = use_token_type_ids
_A : Tuple = use_labels
_A : Optional[Any] = vocab_size
_A : Dict = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : int = intermediate_size
_A : int = hidden_act
_A : Dict = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : List[str] = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : str = initializer_range
_A : List[str] = num_labels
_A : str = num_choices
_A : str = scope
def a__ ( self ) -> Optional[Any]:
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[int] = None
if self.use_input_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_A : Union[str, Any] = None
_A : Union[str, Any] = None
_A : List[str] = None
if self.use_labels:
_A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = ids_tensor([self.batch_size] , self.num_choices )
_A : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : Any = DistilBertModel(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , _a )
_A : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Any:
_A : Union[str, Any] = DistilBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
_A : Dict = DistilBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_A : Any = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
_A : Optional[Any] = self.num_labels
_A : Tuple = DistilBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> List[str]:
_A : Dict = self.num_labels
_A : Tuple = DistilBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
_A : Optional[Any] = self.num_choices
_A : Optional[Any] = DistilBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_A : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A : Dict = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) : Union[str, Any] = config_and_inputs
_A : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( __a,__a,unittest.TestCase ):
_a = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_a = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = True
_a = True
_a = True
def a__ ( self ) -> Union[str, Any]:
_A : int = DistilBertModelTester(self )
_A : Optional[int] = ConfigTester(self , config_class=_a , dim=37 )
def a__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_a )
def a__ ( self ) -> int:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_a )
def a__ ( self ) -> List[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_a )
def a__ ( self ) -> Dict:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_a )
def a__ ( self ) -> str:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_a )
def a__ ( self ) -> Dict:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_a )
@slow
def a__ ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[int] = DistilBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_A : Dict = True
_A : str = model_class(config=_a )
_A : Dict = self._prepare_for_class(_a , _a )
_A : Optional[Any] = torch.jit.trace(
_a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , """traced_model.pt""" ) )
_A : Optional[int] = torch.jit.load(os.path.join(_a , """traced_model.pt""" ) , map_location=_a )
loaded(inputs_dict["""input_ids"""].to(_a ) , inputs_dict["""attention_mask"""].to(_a ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Union[str, Any]:
_A : Dict = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_A : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_A : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A : Any = model(_a , attention_mask=_a )[0]
_A : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
_A : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 26 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A =logging.getLogger(__name__)
A ='Hello world! cécé herlolip'
A =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case_ (_a : List[Any] , _a : Any ):
UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase = torch.load(_a , lambda _a , _a : storage )
UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a )
original.eval()
UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) )
UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase = encoder_input_ids
UpperCAmelCase = decoder_input_ids
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0]
UpperCAmelCase = original.generator(_a )
UpperCAmelCase = new_model(
_a , _a , _a , _a , _a )[0]
UpperCAmelCase = new_model.generator(_a )
UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) )
UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 34 | 0 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase = False) -> float:
if not arr:
return 0
a = 0 if allow_empty_subarrays else float("-inf")
a = 0.0
for num in arr:
a = max(0 if allow_empty_subarrays else num , curr_sum + num)
a = max(lowerCAmelCase__ , lowerCAmelCase__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 361 |
lowercase__ : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 180 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowercase : List[str] = random.Random()
def lowercase_ ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
lowerCamelCase_ : Optional[Any] = global_rng
lowerCamelCase_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=4_0_0 , A=2_0_0_0 , A=1_0 , A=1_6_0 , A=8 , A=0.0 , A=4_0_0_0 , A=False , A=True , ):
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Any = min_seq_length
lowerCamelCase_ : str = max_seq_length
lowerCamelCase_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ : Optional[Any] = padding_value
lowerCamelCase_ : Any = sampling_rate
lowerCamelCase_ : Tuple = return_attention_mask
lowerCamelCase_ : List[str] = do_normalize
lowerCamelCase_ : Dict = feature_size
lowerCamelCase_ : Optional[int] = chunk_length
lowerCamelCase_ : Union[str, Any] = hop_length
def UpperCAmelCase__ (self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ (self , A=False , A=False ):
def _flatten(A ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
lowerCamelCase_ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ : Optional[Any] = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : List[Any] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Optional[Any] = feat_extract_first.save_pretrained(_lowerCamelCase )[0]
check_json_file_has_correct_format(_lowerCamelCase )
lowerCamelCase_ : Tuple = self.feature_extraction_class.from_pretrained(_lowerCamelCase )
lowerCamelCase_ : List[Any] = feat_extract_first.to_dict()
lowerCamelCase_ : Dict = feat_extract_second.to_dict()
lowerCamelCase_ : Optional[Any] = feat_extract_first.mel_filters
lowerCamelCase_ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[Any] = os.path.join(_lowerCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_lowerCamelCase )
lowerCamelCase_ : Optional[Any] = self.feature_extraction_class.from_json_file(_lowerCamelCase )
lowerCamelCase_ : List[str] = feat_extract_first.to_dict()
lowerCamelCase_ : int = feat_extract_second.to_dict()
lowerCamelCase_ : Tuple = feat_extract_first.mel_filters
lowerCamelCase_ : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase_ : Optional[int] = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ : Tuple = feature_extractor(_lowerCamelCase , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ : Optional[int] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCamelCase_ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test batched
lowerCamelCase_ : str = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
lowerCamelCase_ : Optional[Any] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ : Tuple = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase_ : str = np.asarray(_lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
lowerCamelCase_ : List[Any] = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# Test truncation required
lowerCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
lowerCamelCase_ : List[str] = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
lowerCamelCase_ : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase_ : Union[str, Any] = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs_truncated]
lowerCamelCase_ : Tuple = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
lowerCamelCase_ : str = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
def UpperCAmelCase__ (self ):
import torch
lowerCamelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Union[str, Any] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCamelCase_ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ : Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase_ : Optional[Any] = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowerCamelCase_ : Optional[int] = self._load_datasamples(1 )
lowerCamelCase_ : str = WhisperFeatureExtractor()
lowerCamelCase_ : str = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , _lowerCamelCase , atol=1E-4 ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ : Dict = self._load_datasamples(1 )[0]
lowerCamelCase_ : str = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCamelCase_ : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(_lowerCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase ) - 1 ) < 1E-3 ) )
| 318 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self : List[str] ,_UpperCAmelCase : Any ):
super().__init__()
_a : str = model
_a : int = 2
_a : Tuple = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def __lowercase ( self : Optional[Any] ):
pass
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
# load longformer model from model identifier
_a : Optional[int] = LongformerModel.from_pretrained(lowerCAmelCase_ )
_a : str = LightningModel(lowerCAmelCase_ )
_a : Optional[int] = torch.load(lowerCAmelCase_ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_a : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase_ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 367 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
for i in range(0 , lowerCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]:
for i in range(lowerCAmelCase_ , 0 , -1 ):
for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__lowerCAmelCase = 1
while K:
__lowerCAmelCase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__lowerCAmelCase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 107 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case : int = logging.getLogger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = git.Repo(search_parent_directories=UpperCAmelCase_ )
a :Tuple = {
'''repo_id''': str(UpperCAmelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(UpperCAmelCase_ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=4 )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
if params.n_gpu <= 0:
a :Dict = 0
a :Tuple = -1
a :Dict = True
a :Optional[int] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
a :Any = int(os.environ['''WORLD_SIZE'''] )
a :Union[str, Any] = int(os.environ['''N_GPU_NODE'''] )
a :Tuple = int(os.environ['''RANK'''] )
# number of nodes / node ID
a :List[Any] = params.world_size // params.n_gpu_per_node
a :str = params.global_rank // params.n_gpu_per_node
a :Tuple = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
a :Any = 1
a :Any = 0
a :Tuple = 0
a :Any = 0
a :str = 1
a :str = 1
a :Any = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a :Optional[int] = params.node_id == 0 and params.local_rank == 0
a :Optional[Any] = params.n_nodes > 1
# summary
a :List[Any] = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 94 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 0 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 157 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_SCREAMING_SNAKE_CASE : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Any =argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : Dict =parser.parse_args()
return args.f
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int="eval" ):
'''simple docstring'''
UpperCamelCase__ : Dict =os.path.join(UpperCAmelCase , F'''{split}_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , '''r''' ) as f:
return json.load(UpperCAmelCase )
raise ValueError(F'''can\'t find {path}''' )
_SCREAMING_SNAKE_CASE : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_glue.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str =f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_clm_flax.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] =f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_summarization_flax.main()
UpperCamelCase__ : Union[str, Any] =get_results(lowercase_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_mlm_flax.main()
UpperCamelCase__ : List[Any] =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_ta_mlm_flax.main()
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def _lowerCAmelCase ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase__ : Union[str, Any] =7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_ner.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any =f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_qa.main()
UpperCamelCase__ : Tuple =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 157 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Dict = logging.get_logger(__name__)
A_ :int = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""align_text_model"""
def __init__( self , lowerCamelCase__=30522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : str =vocab_size
__UpperCamelCase : Optional[Any] =hidden_size
__UpperCamelCase : int =num_hidden_layers
__UpperCamelCase : List[Any] =num_attention_heads
__UpperCamelCase : Optional[int] =hidden_act
__UpperCamelCase : Dict =intermediate_size
__UpperCamelCase : Optional[Any] =hidden_dropout_prob
__UpperCamelCase : Any =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =max_position_embeddings
__UpperCamelCase : str =type_vocab_size
__UpperCamelCase : List[Any] =initializer_range
__UpperCamelCase : Optional[Any] =layer_norm_eps
__UpperCamelCase : Optional[int] =position_embedding_type
__UpperCamelCase : Optional[int] =use_cache
__UpperCamelCase : Union[str, Any] =pad_token_id
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__UpperCamelCase : Dict =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[Any] ="""align_vision_model"""
def __init__( self , lowerCamelCase__ = 3 , lowerCamelCase__ = 600 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 3.1 , lowerCamelCase__ = 8 , lowerCamelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase__ = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase__ = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase__ = [] , lowerCamelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase__ = 0.25 , lowerCamelCase__ = "swish" , lowerCamelCase__ = 2560 , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 0.001 , lowerCamelCase__ = 0.99 , lowerCamelCase__ = 0.2 , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : Dict =num_channels
__UpperCamelCase : List[Any] =image_size
__UpperCamelCase : List[str] =width_coefficient
__UpperCamelCase : List[Any] =depth_coefficient
__UpperCamelCase : List[Any] =depth_divisor
__UpperCamelCase : int =kernel_sizes
__UpperCamelCase : List[Any] =in_channels
__UpperCamelCase : int =out_channels
__UpperCamelCase : str =depthwise_padding
__UpperCamelCase : Optional[Any] =strides
__UpperCamelCase : Any =num_block_repeats
__UpperCamelCase : List[Any] =expand_ratios
__UpperCamelCase : int =squeeze_expansion_ratio
__UpperCamelCase : Tuple =hidden_act
__UpperCamelCase : List[str] =hidden_dim
__UpperCamelCase : Optional[Any] =pooling_type
__UpperCamelCase : int =initializer_range
__UpperCamelCase : Optional[Any] =batch_norm_eps
__UpperCamelCase : Union[str, Any] =batch_norm_momentum
__UpperCamelCase : Tuple =drop_connect_rate
__UpperCamelCase : str =sum(lowerCamelCase__ ) * 4
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Any =cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__UpperCamelCase : str =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[Any] ="""align"""
UpperCamelCase__ : List[str] =True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=640 , lowerCamelCase__=1.0 , lowerCamelCase__=0.02 , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
if text_config is None:
__UpperCamelCase : Optional[Any] ={}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__UpperCamelCase : int ={}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__UpperCamelCase : Dict =AlignTextConfig(**lowerCamelCase__ )
__UpperCamelCase : Any =AlignVisionConfig(**lowerCamelCase__ )
__UpperCamelCase : List[str] =projection_dim
__UpperCamelCase : Any =temperature_init_value
__UpperCamelCase : List[str] =initializer_range
@classmethod
def __lowercase ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[str] =self.text_config.to_dict()
__UpperCamelCase : List[str] =self.vision_config.to_dict()
__UpperCamelCase : str =self.__class__.model_type
return output
| 71 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowercase__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase__ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=False ) -> Any:
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_ )["wer"]
else:
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[Any] = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : str = compute_measures(lowercase_ , lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 151 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
A__ : Dict =sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 |
'''simple docstring'''
import math
__snake_case : List[Any] = 10
__snake_case : Dict = 7
__snake_case : str = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCamelCase ( __snake_case : int = 20 ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =math.comb(__snake_case, __snake_case )
A__ : str =math.comb(NUM_BALLS - BALLS_PER_COLOUR, __snake_case )
A__ : Optional[int] =NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 136 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Optional[int] , a__ : Optional[int] , a__ : List[str]=12 , a__ : Dict=7 , a__ : Union[str, Any]=True , a__ : List[str]=True , a__ : List[str]=True , a__ : Any=99 , a__ : int=32 , a__ : Optional[int]=32 , a__ : Optional[Any]=2 , a__ : List[Any]=4 , a__ : int=37 , a__ : List[Any]=0.1 , a__ : List[str]=0.1 , a__ : List[str]=512 , a__ : Dict=0.0_2 , a__ : int=0 , a__ : Tuple=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = projection_dim
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = scope
__snake_case = bos_token_id
def a (self : int ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__snake_case = input_mask.numpy()
__snake_case , __snake_case = input_mask.shape
__snake_case = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a__ ):
__snake_case = 1
__snake_case = 0
__snake_case = self.get_config()
return config, input_ids, tf.convert_to_tensor(a__ )
def a (self : Any ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def a (self : Any , a__ : Optional[int] , a__ : Tuple , a__ : List[Any] ):
"""simple docstring"""
__snake_case = TFBlipTextModel(config=a__ )
__snake_case = model(a__ , attention_mask=a__ , training=a__ )
__snake_case = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (TFBlipTextModel,) if is_tf_available() else ()
A_ : List[Any] = False
A_ : List[str] = False
A_ : Tuple = False
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = BlipTextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : str ):
"""simple docstring"""
pass
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFBlipTextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a (self : Tuple , a__ : List[str]=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=a__ )
| 24 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Union[str, Any] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = max(snake_case_ )
__UpperCAmelCase = min(snake_case_ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case_ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case_ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowercase__ ( snake_case_ :str ):
return "".join([chr(snake_case_ ) for i in counting_sort([ord(snake_case_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_lowercase : int = input('Enter numbers separated by a comma:\n').strip()
_lowercase : int = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 332 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="None" ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Optional[int]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : Union[str, Any] = relative_attention
UpperCAmelCase_ : List[Any] = position_biased_input
UpperCAmelCase_ : Union[str, Any] = pos_att_type
UpperCAmelCase_ : Dict = scope
def a__ ( self ) -> int:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Optional[Any]:
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = DebertaVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : str = DebertaVaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : int = DebertaVaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = DebertaVaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Optional[Any] = DebertaVaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Dict = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,start_positions=_SCREAMING_SNAKE_CASE ,end_positions=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : int = DebertaVaForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : List[Any] = model(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : str = config_and_inputs
UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = DebertaVaModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> int:
self.config_tester.run_common_tests()
def a__ ( self ) -> Any:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = DebertaVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def a__ ( self ) -> List[Any]:
pass
@slow
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase_ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Tuple = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ,f'''{output[:, 1:4, 1:4]}''' ) | 235 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('''num_inference_steps''', 50),)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Tuple = {'''num_train_timesteps''': 1_000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : int = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCAmelCase_ : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ,_SCREAMING_SNAKE_CASE=0 ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Tuple = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_sample
UpperCAmelCase_ : Tuple = 0.1 * sample
UpperCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Tuple = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase_ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : str = dummy_past_residuals[:]
UpperCAmelCase_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Optional[int] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : str = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).prev_sample
return sample
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Any = kwargs.pop('''num_inference_steps''' ,_SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE ,'''set_timesteps''' ):
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[Any] = scheduler.timesteps[5]
UpperCAmelCase_ : Dict = scheduler.timesteps[6]
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self ) -> Any:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE ,time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : str = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10 | 235 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str]=13 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : List[str]=99 ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,SCREAMING_SNAKE_CASE__ : List[str]=32 ,SCREAMING_SNAKE_CASE__ : List[Any]=5 ,SCREAMING_SNAKE_CASE__ : List[str]=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : Dict="last" ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Dict=0 ,):
SCREAMING_SNAKE_CASE:Optional[Any] = parent
SCREAMING_SNAKE_CASE:Tuple = batch_size
SCREAMING_SNAKE_CASE:List[str] = seq_length
SCREAMING_SNAKE_CASE:Optional[int] = is_training
SCREAMING_SNAKE_CASE:int = use_input_lengths
SCREAMING_SNAKE_CASE:Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE:List[str] = use_labels
SCREAMING_SNAKE_CASE:int = gelu_activation
SCREAMING_SNAKE_CASE:Dict = sinusoidal_embeddings
SCREAMING_SNAKE_CASE:List[str] = causal
SCREAMING_SNAKE_CASE:Optional[int] = asm
SCREAMING_SNAKE_CASE:Tuple = n_langs
SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE:List[Any] = n_special
SCREAMING_SNAKE_CASE:int = hidden_size
SCREAMING_SNAKE_CASE:List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE:str = num_attention_heads
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE:Any = type_sequence_label_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Dict = num_labels
SCREAMING_SNAKE_CASE:List[str] = num_choices
SCREAMING_SNAKE_CASE:int = summary_type
SCREAMING_SNAKE_CASE:Union[str, Any] = use_proj
SCREAMING_SNAKE_CASE:Optional[Any] = scope
SCREAMING_SNAKE_CASE:Optional[int] = bos_token_id
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE:Any = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE:str = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE:Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE:Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
SCREAMING_SNAKE_CASE:Union[str, Any] = None
SCREAMING_SNAKE_CASE:int = None
SCREAMING_SNAKE_CASE:Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor([self.batch_size] ,2 ).float()
SCREAMING_SNAKE_CASE:Any = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE:Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self : List[str] ):
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int ,):
SCREAMING_SNAKE_CASE:List[str] = XLMModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:str = model(SCREAMING_SNAKE_CASE__ ,lengths=SCREAMING_SNAKE_CASE__ ,langs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ ,langs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,):
SCREAMING_SNAKE_CASE:Dict = XLMWithLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:Dict = XLMForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,):
SCREAMING_SNAKE_CASE:Any = XLMForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = model(
SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,cls_index=SCREAMING_SNAKE_CASE__ ,is_impossible=SCREAMING_SNAKE_CASE__ ,p_mask=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Tuple = model(
SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,cls_index=SCREAMING_SNAKE_CASE__ ,is_impossible=SCREAMING_SNAKE_CASE__ ,)
((SCREAMING_SNAKE_CASE) , ):Dict = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE) , ):Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
SCREAMING_SNAKE_CASE:Tuple = XLMForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:List[str] = self.num_labels
SCREAMING_SNAKE_CASE:List[str] = XLMForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Dict = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE:Union[str, Any] = XLMForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:List[str] = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
):Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE:Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , _a , unittest.TestCase ):
_A : Optional[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A : Dict = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
SCREAMING_SNAKE_CASE:Dict = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE:str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Union[str, Any] = XLMModelTester(self )
SCREAMING_SNAKE_CASE:Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,emb_dim=37 )
def __UpperCamelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Any=1 ):
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for iter_attentions in attentions] ,[True] * len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(SCREAMING_SNAKE_CASE__ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE:Optional[int] = min_length + idx + 1
SCREAMING_SNAKE_CASE:List[Any] = min_length + idx + 1
SCREAMING_SNAKE_CASE:str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : List[str]=1 ):
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for iter_hidden_states in hidden_states] ,[True] * len(SCREAMING_SNAKE_CASE__ ) ,)
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(SCREAMING_SNAKE_CASE__ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE:List[Any] = min_length + idx + 1
SCREAMING_SNAKE_CASE:List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(SCREAMING_SNAKE_CASE__ ) ,)
pass
@slow
def __UpperCamelCase ( self : Tuple ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:str = XLMModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.tensor([[14, 447]] ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ ) # the president
SCREAMING_SNAKE_CASE:int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE:Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,SCREAMING_SNAKE_CASE__ )
| 139 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def A_ ( snake_case , snake_case , snake_case , snake_case = 100 , ):
SCREAMING_SNAKE_CASE:Any = x_start
SCREAMING_SNAKE_CASE:int = fnc(snake_case )
SCREAMING_SNAKE_CASE:int = 0.0
for _ in range(snake_case ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE:Optional[int] = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE:int = fnc(snake_case )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE:Tuple = xa
SCREAMING_SNAKE_CASE:List[Any] = fxa
return length
if __name__ == "__main__":
def A_ ( snake_case ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
A_ = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 139 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
__UpperCAmelCase : int = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
__UpperCAmelCase : List[Any] = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
__UpperCAmelCase : Optional[Any] = DetrConfig(use_timm_backbone=snake_case__, backbone_config=snake_case__ )
# set label attributes
__UpperCAmelCase : Optional[Any] = "panoptic" in model_name
if is_panoptic:
__UpperCAmelCase : Optional[int] = 250
else:
__UpperCAmelCase : Dict = 91
__UpperCAmelCase : Optional[Any] = "huggingface/label-files"
__UpperCAmelCase : Tuple = "coco-detection-id2label.json"
__UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _UpperCamelCase ( snake_case__ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCAmelCase : Optional[Any] = val
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = ""
if is_panoptic:
__UpperCAmelCase : Dict = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase : int = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[:256, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[:256]
__UpperCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[256:512]
__UpperCAmelCase : Optional[Any] = in_proj_weight[-256:, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : Tuple = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Any = in_proj_weight[:256, :]
__UpperCAmelCase : Tuple = in_proj_bias[:256]
__UpperCAmelCase : str = in_proj_weight[256:512, :]
__UpperCAmelCase : Dict = in_proj_bias[256:512]
__UpperCAmelCase : Any = in_proj_weight[-256:, :]
__UpperCAmelCase : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCAmelCase : Union[str, Any] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__UpperCAmelCase : int = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCAmelCase : List[Any] = in_proj_weight_cross_attn[:256, :]
__UpperCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[:256]
__UpperCAmelCase : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
__UpperCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__UpperCAmelCase : Union[str, Any] = in_proj_weight_cross_attn[-256:, :]
__UpperCAmelCase : int = in_proj_bias_cross_attn[-256:]
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : int = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Tuple:
__UpperCAmelCase : Dict = get_detr_config(snake_case__ )
# load original model from torch hub
__UpperCAmelCase : Optional[int] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
__UpperCAmelCase : List[Any] = torch.hub.load("facebookresearch/detr", model_name_to_original_name[model_name], pretrained=snake_case__ ).eval()
__UpperCAmelCase : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
__UpperCAmelCase : Dict = "detr." + src
rename_key(snake_case__, snake_case__, snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__, is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Dict = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__UpperCAmelCase : Optional[int] = state_dict.pop(snake_case__ )
__UpperCAmelCase : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase : int = state_dict.pop(snake_case__ )
__UpperCAmelCase : Union[str, Any] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__UpperCAmelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCAmelCase : Union[str, Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__UpperCAmelCase : int = state_dict.pop(snake_case__ )
__UpperCAmelCase : Optional[Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Optional[Any] = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
__UpperCAmelCase : Union[str, Any] = "coco_panoptic" if is_panoptic else "coco_detection"
__UpperCAmelCase : Union[str, Any] = DetrImageProcessor(format=snake_case__ )
__UpperCAmelCase : str = processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Optional[Any] = encoding["pixel_values"]
__UpperCAmelCase : List[Any] = detr(snake_case__ )
__UpperCAmelCase : str = model(snake_case__ )
assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-3 )
assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_snake_case = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 356 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
a : List[str] = parent
a : str = batch_size
a : int = seq_length
a : int = is_training
a : str = use_input_mask
a : Dict = use_token_type_ids
a : List[str] = use_labels
a : int = vocab_size
a : Any = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : Any = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Optional[int] = max_position_embeddings
a : str = type_vocab_size
a : int = type_sequence_label_size
a : Any = initializer_range
a : Optional[int] = num_labels
a : Optional[int] = num_choices
a : str = scope
def __a ( self ) -> Tuple:
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : str = random_attention_mask([self.batch_size, self.seq_length] )
a : str = None
if self.use_token_type_ids:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : int = None
a : Dict = None
a : Tuple = None
if self.use_labels:
a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : str = ids_tensor([self.batch_size] , self.num_choices )
a : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> List[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __a ( self ) -> int:
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Union[str, Any] = self.prepare_config_and_inputs()
a : Optional[Any] = True
a : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : Tuple = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a : Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Any:
a : Tuple = True
a : int = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
a : List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
a : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Dict = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Union[str, Any] = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : int = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[Any] = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : str = self.num_labels
a : List[Any] = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : int = self.num_labels
a : Union[str, Any] = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = self.num_choices
a : List[str] = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[int] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> List[str]:
a : str = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Optional[Any] = config_and_inputs
a : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : str =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Union[str, Any] =(
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[int] =True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
a : str = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
a : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
a : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __a ( self ) -> Tuple:
a : Tuple = NezhaModelTester(self )
a : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def __a ( self ) -> str:
# This regression test was failing with PyTorch < 1.3
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
a : Any = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def __a ( self ) -> Dict:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def __a ( self ) -> Any:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> List[str]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def __a ( self ) -> Optional[Any]:
a, a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a : List[Any] = True
a : int = model_class(config=lowerCAmelCase__ )
a : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "bert.pt" ) )
a : Dict = torch.jit.load(os.path.join(lowerCAmelCase__ , "bert.pt" ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict["input_ids"].to(lowerCAmelCase__ ) , inputs_dict["attention_mask"].to(lowerCAmelCase__ ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> int:
a : int = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
a : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a : str = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def __a ( self ) -> List[Any]:
a : Optional[int] = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
a : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a : Tuple = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 105 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 105 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A_ : Optional[int] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCAmelCase__ (snake_case__ : int = 50_00 ):
"""simple docstring"""
_snake_case : List[str] = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ , len(snake_case__ ) ):
_snake_case : Dict = pentagonal_nums[j]
_snake_case : Optional[Any] = pentagonal_i + pentagonal_j
_snake_case : List[str] = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 132 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=64 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = embedding_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def _snake_case (self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case (self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = MegatronBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
__lowerCAmelCase = model(__lowercase , token_type_ids=__lowercase )
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = MegatronBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = MegatronBertForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = MegatronBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = MegatronBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = MegatronBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MegatronBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MegatronBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MegatronBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = True
# test_resize_embeddings = False
__UpperCamelCase : Dict = False
def _snake_case (self , __lowercase , __lowercase , __lowercase=False ):
__lowerCAmelCase = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def _snake_case (self ):
__lowerCAmelCase = MegatronBertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase )
def __magic_name__( lowerCamelCase):
return torch.tensor(
lowerCamelCase, dtype=torch.long, device=lowerCamelCase, )
_UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def _snake_case (self ):
__lowerCAmelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowerCAmelCase = os.path.join(os.environ['''MYDIR'''] , __lowercase )
__lowerCAmelCase = MegatronBertModel.from_pretrained(__lowercase )
model.to(__lowercase )
model.half()
__lowerCAmelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase = model(__lowercase )[0]
__lowerCAmelCase = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __lowercase )
__lowerCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__lowerCAmelCase = output[0, ii, jj]
__lowerCAmelCase = expected[3 * ii + jj]
__lowerCAmelCase = '''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
| 174 |
'''simple docstring'''
from math import factorial, radians
def __magic_name__( lowerCamelCase, lowerCamelCase = 1_8, lowerCamelCase = 1_0):
__lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__lowerCAmelCase = radians(lowerCamelCase)
__lowerCAmelCase = angle_in_radians
__lowerCAmelCase = 3
__lowerCAmelCase = -1
for _ in range(lowerCamelCase):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase)
__lowerCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase, lowerCamelCase)
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 174 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
lowercase : str = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
lowercase : int = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
lowercase : Any = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
lowercase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
lowercase : Optional[int] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class lowerCamelCase ( metaclass=_UpperCAmelCase ):
lowercase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def a_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 27 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ):
super().__init__()
UpperCamelCase : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase : List[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase : int = [1, 0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
UpperCamelCase : Dict = hidden_states
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase : str = self.transformer_index_for_condition[i]
UpperCamelCase : Any = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 27 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE__ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = ['pixel_values']
def __init__( self : List[Any],lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : PILImageResampling = PILImageResampling.BILINEAR,lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : bool = True,lowercase_ : Union[int, float] = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : Optional[Union[float, List[float]]] = None,lowercase_ : Optional[Union[float, List[float]]] = None,**lowercase_ : Any,)-> None:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = size if size is not None else {'shortest_edge': 2_5_6}
A__ = get_size_dict(lowercase_,default_to_square=lowercase_ )
A__ = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
A__ = get_size_dict(lowercase_,param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Any,lowercase_ : np.ndarray,lowercase_ : Dict[str, int],lowercase_ : PILImageResampling = PILImageResampling.BILINEAR,lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : Tuple,)-> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase_,default_to_square=lowercase_ )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(lowercase_,size['shortest_edge'],default_to_square=lowercase_ )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase_,size=lowercase_,resample=lowercase_,data_format=lowercase_,**lowercase_ )
def snake_case__ ( self : str,lowercase_ : np.ndarray,lowercase_ : Dict[str, int],lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : Tuple,)-> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase_,size=(size['height'], size['width']),data_format=lowercase_,**lowercase_ )
def snake_case__ ( self : Tuple,lowercase_ : np.ndarray,lowercase_ : Union[int, float],lowercase_ : bool = True,lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : Tuple,)-> str:
'''simple docstring'''
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(lowercase_,scale=lowercase_,data_format=lowercase_,**lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : np.ndarray,lowercase_ : Union[float, List[float]],lowercase_ : Union[float, List[float]],lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : List[str],)-> np.ndarray:
'''simple docstring'''
return normalize(lowercase_,mean=lowercase_,std=lowercase_,data_format=lowercase_,**lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : ImageInput,lowercase_ : bool = None,lowercase_ : Dict[str, int] = None,lowercase_ : PILImageResampling = None,lowercase_ : bool = None,lowercase_ : Dict[str, int] = None,lowercase_ : bool = None,lowercase_ : float = None,lowercase_ : bool = None,lowercase_ : bool = None,lowercase_ : Optional[Union[float, List[float]]] = None,lowercase_ : Optional[Union[float, List[float]]] = None,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST,)-> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(lowercase_ )
if do_resize:
A__ = self.resize(image=lowercase_,size=lowercase_,resample=lowercase_ )
if do_center_crop:
A__ = self.center_crop(lowercase_,size=lowercase_ )
if do_rescale:
A__ = self.rescale(image=lowercase_,scale=lowercase_,offset=lowercase_ )
if do_normalize:
A__ = self.normalize(image=lowercase_,mean=lowercase_,std=lowercase_ )
A__ = to_channel_dimension_format(lowercase_,lowercase_ )
return image
def snake_case__ ( self : Any,lowercase_ : ImageInput,lowercase_ : bool = None,lowercase_ : Dict[str, int] = None,lowercase_ : PILImageResampling = None,lowercase_ : bool = None,lowercase_ : Dict[str, int] = None,lowercase_ : bool = None,lowercase_ : float = None,lowercase_ : bool = None,lowercase_ : bool = None,lowercase_ : Optional[Union[float, List[float]]] = None,lowercase_ : Optional[Union[float, List[float]]] = None,lowercase_ : Optional[Union[str, TensorType]] = None,lowercase_ : ChannelDimension = ChannelDimension.FIRST,**lowercase_ : Tuple,)-> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase_,default_to_square=lowercase_ )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase_,param_name='crop_size' )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(lowercase_ )
A__ = [
[
self._preprocess_image(
image=lowercase_,do_resize=lowercase_,size=lowercase_,resample=lowercase_,do_center_crop=lowercase_,crop_size=lowercase_,do_rescale=lowercase_,rescale_factor=lowercase_,offset=lowercase_,do_normalize=lowercase_,image_mean=lowercase_,image_std=lowercase_,data_format=lowercase_,)
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=lowercase_,tensor_type=lowercase_ )
| 7 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_SCREAMING_SNAKE_CASE = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_SCREAMING_SNAKE_CASE = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = VOCAB_FILES_NAMES
lowerCamelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :Optional[Any] = BertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[str]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A = getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**lowerCAmelCase_ )
_A = do_lower_case
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[str]:
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 180 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
def __init__( self : Dict, a_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(a_ ) != 0:
UpperCamelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a_ ) != cols:
raise error
for value in row:
if not isinstance(a_, (int, float) ):
raise error
UpperCamelCase__ = rows
else:
UpperCamelCase__ = []
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ ( self : Any, a_ : int, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a_ ).determinant()
def lowercase_ ( self : int, a_ : int, a_ : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(a_, a_ )
return -1 * self.get_minor(a_, a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(a_, a_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ):
"""simple docstring"""
return str(self.rows )
def __str__( self : int ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(a_ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ ( self : int, a_ : list[int], a_ : int | None = None ):
"""simple docstring"""
UpperCamelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(a_, a_ ):
raise type_error
for value in row:
if not isinstance(a_, (int, float) ):
raise type_error
if len(a_ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(a_ )
else:
UpperCamelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self : str, a_ : list[int], a_ : int | None = None ):
"""simple docstring"""
UpperCamelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(a_, a_ ):
raise type_error
for value in column:
if not isinstance(a_, (int, float) ):
raise type_error
if len(a_ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCamelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCamelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int, a_ : object ):
"""simple docstring"""
if not isinstance(a_, a_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : int, a_ : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : Optional[Any], a_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any, a_ : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple, a_ : Matrix | int | float ):
"""simple docstring"""
if isinstance(a_, (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a_, a_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(a_, a_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : int, a_ : int ):
"""simple docstring"""
if not isinstance(a_, a_ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCamelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ ( cls : str, a_ : list[int], a_ : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = [[] for _ in range(UpperCAmelCase )]
a_ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(UpperCAmelCase ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase )
a_ = ["".join(UpperCAmelCase ) for row in temp_grid]
a_ = "".join(UpperCAmelCase )
return output_string
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
a_ = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
a_ = [[] for _ in range(UpperCAmelCase )] # generates template
for position in range(len(UpperCAmelCase ) ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
a_ = 0
for row in temp_grid: # fills in the characters
a_ = input_string[counter : counter + len(UpperCAmelCase )]
grid.append(list(UpperCAmelCase ) )
counter += len(UpperCAmelCase )
a_ = "" # reads as zigzag
for position in range(len(UpperCAmelCase ) ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {}
for key_guess in range(1 , len(UpperCAmelCase ) ): # tries every key
a_ = decrypt(UpperCAmelCase , UpperCAmelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = GPTSwaTokenizer
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
a = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Any:
a = "This is a test"
a = "This is a test"
return input_text, output_text
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = "<s>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__lowerCamelCase ) , 20_00 )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
a = GPTSwaTokenizer(__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def __UpperCAmelCase ( self : List[Any] ) -> str:
a = GPTSwaTokenizer(__lowerCamelCase )
a = ["This is a test", "I was born in 92000, and this is falsé."]
a = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
a = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
| 107 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''visual_bert'''
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=5_12 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =visual_embedding_dim
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =initializer_range
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =bypass_transformer
_lowerCAmelCase =special_visual_initialize
| 341 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: Optional[int]=13 , __lowerCamelCase: str=[30, 30] , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Dict=True , __lowerCamelCase: List[Any]=32 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: int=37 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: int=0.1 , __lowerCamelCase: List[str]=10 , __lowerCamelCase: int=0.02 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: List[Any]=None , __lowerCamelCase: List[str]=8 , __lowerCamelCase: Any=10 , ) -> Dict:
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : int = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : List[str] = scope
__UpperCAmelCase : List[Any] = n_targets
__UpperCAmelCase : Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__UpperCAmelCase : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__UpperCAmelCase : List[str] = num_patches + 1 + self.num_detection_tokens
def _lowerCamelCase ( self: Tuple ) -> List[str]:
__UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__UpperCAmelCase : Union[str, Any] = []
for i in range(self.batch_size ):
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__lowerCamelCase )
__UpperCAmelCase : Dict = torch.rand(self.n_targets , 4 , device=__lowerCamelCase )
labels.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Optional[Any] ) -> int:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = YolosModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
__UpperCAmelCase : Dict = YolosForObjectDetection(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(pixel_values=__lowerCamelCase )
__UpperCAmelCase : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__UpperCAmelCase : Optional[int] = model(pixel_values=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowerCamelCase ( self: Optional[Any] ) -> str:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[Any] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__: Union[str, Any] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int]=False ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__UpperCAmelCase : List[Any] = []
for i in range(self.model_tester.batch_size ):
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__lowerCamelCase , dtype=torch.long )
__UpperCAmelCase : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=__lowerCamelCase , dtype=torch.float )
labels.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = labels
return inputs_dict
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = YolosModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> int:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: Any ) -> Any:
# YOLOS does not use inputs_embeds
pass
def _lowerCamelCase ( self: Tuple ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
# in YOLOS, the seq_len is different
__UpperCAmelCase : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : str = True
__UpperCAmelCase : str = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCAmelCase : str = len(__lowerCamelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Dict ):
__UpperCAmelCase : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[Any] = outputs.hidden_states
__UpperCAmelCase : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# YOLOS has a different seq_length
__UpperCAmelCase : Any = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Tuple ) -> int:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = YolosModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> Optional[int]:
__UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: int ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : int = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(inputs.pixel_values )
# verify outputs
__UpperCAmelCase : Optional[Any] = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify postprocessing
__UpperCAmelCase : List[str] = image_processor.post_process_object_detection(
__lowerCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__UpperCAmelCase : Union[str, Any] = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(__lowerCamelCase )
__UpperCAmelCase : Any = [75, 75, 17, 63, 17]
__UpperCAmelCase : List[Any] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(__lowerCamelCase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __lowerCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __lowerCamelCase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __lowerCamelCase ) )
| 157 | def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = ""
for word_or_phrase in separated:
if not isinstance(snake_case__, snake_case__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a : Tuple = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a : Any = get_activation('gelu' )
__a : List[str] = get_activation('gelu_10' )
__a : Tuple = torch_builtin(__a )
__a : Any = geluaa(__a )
__a : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__a ):
get_activation('bogus' )
with self.assertRaises(__a ):
get_activation(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = get_activation('gelu' )
__a : List[str] = 1
__a : str = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
__a : List[str] = acta.a
| 364 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCAmelCase : Dict = datasets.utils.logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = ["names", "prefix"]
UpperCAmelCase : Tuple = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
UpperCAmelCase : Union[str, Any] = ["encoding_errors", "on_bad_lines"]
UpperCAmelCase : Optional[Any] = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
lowercase__ = ","
lowercase__ = None
lowercase__ = "infer"
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = True
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = "."
lowercase__ = None
lowercase__ = '"'
lowercase__ = 0
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = True
lowercase__ = True
lowercase__ = 0
lowercase__ = True
lowercase__ = False
lowercase__ = None
lowercase__ = 10000
lowercase__ = None
lowercase__ = "strict"
lowercase__ = "error"
lowercase__ = None
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
if self.delimiter is not None:
lowercase_ = self.delimiter
if self.column_names is not None:
lowercase_ = self.column_names
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase_):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
lowercase__ = CsvConfig
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
lowercase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase_ , (str, list, tuple)):
lowercase_ = data_files
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(lowerCAmelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(lowerCAmelCase_) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase_ , gen_kwargs={"""files""": files}))
return splits
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : pa.Table):
"""simple docstring"""
if self.config.features is not None:
lowercase_ = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase_) for feature in self.config.features.values()):
# cheaper cast
lowercase_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase_)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(lowerCAmelCase_ , lowerCAmelCase_)
return pa_table
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase_) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_)):
lowercase_ = pd.read_csv(lowerCAmelCase_ , iterator=lowerCAmelCase_ , dtype=lowerCAmelCase_ , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase_):
lowercase_ = pa.Table.from_pandas(lowerCAmelCase_)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase_)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase_)}: {e}''')
raise
| 136 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.array(__lowerCAmelCase )
if reduce_labels:
lowercase_ = 2_55
lowercase_ = label - 1
lowercase_ = 2_55
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = pred_label[mask]
lowercase_ = np.array(__lowerCAmelCase )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
lowercase_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 136 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase : Optional[Any] = """src/transformers"""
_lowerCAmelCase : List[str] = """docs/source/en/tasks"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple , snake_case : str )-> Any:
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase__ : Tuple = f.readlines()
# Find the start prompt.
UpperCAmelCase__ : Dict = 0
while not lines[start_index].startswith(snake_case ):
start_index += 1
start_index += 1
UpperCAmelCase__ : List[str] = start_index
while not lines[end_index].startswith(snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase : Optional[int] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase__ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case , set() )
UpperCAmelCase__ : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : List[Any]=False )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = _find_text_in_file(
filename=os.path.join(snake_case , snake_case ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCAmelCase__ : Optional[int] = get_model_list_for_task(snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case , snake_case ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = VideoToVideoSDPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase__ : Any = False
# No `output_type`.
UpperCAmelCase__ : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
_a : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_a : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_a : Dict = CLIPTextModel(_a )
_a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
# 3 frames
_a : int = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
_a : Optional[Any] = torch.manual_seed(_a )
else:
_a : Optional[int] = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : str = self.get_dummy_components()
_a : Dict = VideoToVideoSDPipeline(**_a )
_a : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : List[str] = self.get_dummy_inputs(_a )
_a : List[Any] = '''np'''
_a : Optional[Any] = sd_pipe(**_a ).frames
_a : Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
_a : List[Any] = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowercase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def __lowercase ( self ) -> Optional[int]:
pass
def __lowercase ( self ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Optional[Any] = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_a : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a : Tuple = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_a )
_a : str = video.to('''cuda''' )
_a : int = '''Spiderman is surfing'''
_a : Optional[int] = pipe(_a , video=_a , generator=_a , num_inference_steps=3 , output_type='''pt''' ).frames
_a : Optional[int] = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 235 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Dict ) -> Tuple:
"""simple docstring"""
_a : Any = R'''\w+[.]\d+'''
_a : Union[str, Any] = re.findall(__a ,__a )
for pat in pats:
_a : int = key.replace(__a ,'''_'''.join(pat.split('''.''' ) ) )
return key
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a : List[str] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_a : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __a : Dict ,__a : str ,__a : str=42 ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a : List[Any] = flax_model.init_weights(PRNGKey(__a ) )
_a : Optional[int] = flatten_dict(__a )
_a : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a : List[str] = rename_key(__a )
_a : Optional[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_a , _a : List[str] = rename_key_and_reshape_tensor(__a ,__a ,__a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
_a : Dict = jnp.asarray(__a )
return unflatten_dict(__a )
| 235 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a : str = flax_key_tuple[:-1] + ('weight',)
a : Optional[int] = torch.permute(snake_case , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case ):
# linear layer
a : str = flax_key_tuple[:-1] + ('weight',)
a : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a : Optional[int] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Dict , snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
if "metadata" in layer:
a : Union[str, Any] = layer.split('metadata' )
a : Union[str, Any] = ''.join(split_layer[0] )[:-1]
a : List[str] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
a : Tuple = layer.split('kvstore' )
a : Tuple = ''.join(split_layer[0] )[:-1]
a : Any = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
a : List[str] = layer.split('/' )
a : Optional[int] = '/'.join(split_layer[:-1] )
a : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a : List[str] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a : Tuple = 'file'
else:
a : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple ) -> int:
"""simple docstring"""
a : List[str] = rename_keys(snake_case )
a : Any = {}
for k, v in current_block.items():
a : Optional[Any] = v
a : Union[str, Any] = new_current_block
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[int] , snake_case : Tuple = WEIGHTS_NAME ) -> Union[str, Any]:
"""simple docstring"""
a : Tuple = convert_file_size_to_int(snake_case )
a : Optional[int] = []
a : int = {}
a : Dict = 0
a : Any = 0
os.makedirs(snake_case , exist_ok=snake_case )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
a : Union[str, Any] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
a : str = flatten_dict(snake_case , sep='/' )
a : List[str] = {}
for layer in checkpoint_info.keys():
a , a , a : Optional[int] = get_key_and_tensorstore_dict(
snake_case , snake_case , snake_case )
if curr_real_layer_name in all_layers:
a : Union[str, Any] = content
else:
a : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a : Union[str, Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a : List[str] = torch.tensor(snake_case )
a : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a : Optional[int] = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case )
a : str = '/'.join(snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a : Optional[int] = os.path.join(
snake_case , weights_name.replace('.bin' , F"""-{len(snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(snake_case , snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
a : List[str] = {}
a : str = 0
a : List[str] = raw_weights.to(getattr(snake_case , snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a : List[str] = os.path.join(snake_case , weights_name.replace('.bin' , F"""-{len(snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(snake_case , snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a : List[str] = {}
a : int = {}
for idx, shard in enumerate(snake_case ):
a : List[str] = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(snake_case ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a : Union[str, Any] = os.path.join(snake_case , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(snake_case , os.path.join(snake_case , snake_case ) )
a : List[Any] = shard
for key in shard:
a : List[str] = shard_file
# Add the metadata
a : Any = {'total_size': total_size}
a : str = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case , snake_case ) , 'w' , encoding='utf-8' ) as f:
a : int = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + '\n'
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
UpperCamelCase : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a : List[Any] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
a : Tuple = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
a : Optional[int] = TaTokenizer.from_pretrained('t5-small' )
a : Dict = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
a : List[str] = tokenizer(snake_case , return_tensors='pt' ).input_ids
a : int = model.generate(snake_case , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 355 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 0 |
import itertools
import math
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 2
while True:
if is_prime(_A ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE ( lowercase : Dict = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _A ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Any = """LayoutLMv2ImageProcessor"""
__lowerCamelCase : int = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> Any:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case__ , )
UpperCAmelCase : List[Any] =kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase : Optional[Any] =self.image_processor(images=snake_case__ , return_tensors=snake_case__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : int =[text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase : Dict =features['''words''']
UpperCAmelCase : Union[str, Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel values
UpperCAmelCase : Tuple =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase : Dict =self.get_overflowing_images(snake_case__ , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase : str =images
return encoded_inputs
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(snake_case__ )} and {len(snake_case__ )}''' )
return images_with_overflow
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , )
return self.image_processor
| 78 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCamelCase ( snake_case__ , snake_case__):
"""simple docstring"""
UpperCamelCase__ = "nat"
UpperCamelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=64 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 16] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(UpperCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 39 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str
_SCREAMING_SNAKE_CASE :int
def _lowercase ( __lowerCAmelCase ) -> list[str]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__lowerCAmelCase ) )]
def _lowercase ( __lowerCAmelCase ) -> BWTTransformDict:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
SCREAMING_SNAKE_CASE__ : str = all_rotations(__lowerCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCAmelCase ),
}
return response
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
SCREAMING_SNAKE_CASE__ : List[Any] = int(__lowerCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__lowerCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
SCREAMING_SNAKE_CASE__ : str = [""""""] * len(__lowerCAmelCase )
for _ in range(len(__lowerCAmelCase ) ):
for i in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a :Union[str, Any] = "Provide a string that I will generate its BWT transform: "
a :str = input(entry_msg).strip()
a :int = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
a :int = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 132 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_UpperCAmelCase = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_UpperCAmelCase = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
A_ : str = list(state_dict.keys() )
for name in state_dict_keys:
A_ : Tuple = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('emb.' ):
A_ : Optional[Any] = name.replace('emb.' ,'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
A_ : List[Any] = name.replace('blocks.0.ln0' ,'blocks.0.pre_ln' )
# att -> attention
A_ : Optional[int] = re.sub(r'blocks\.(\d+)\.att' ,r'blocks.\1.attention' ,lowerCamelCase_ )
# ffn -> feed_forward
A_ : List[str] = re.sub(r'blocks\.(\d+)\.ffn' ,r'blocks.\1.feed_forward' ,lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
A_ : int = name.replace('.time_mix_k' ,'.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
A_ : Optional[Any] = name.replace('.time_mix_v' ,'.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
A_ : List[str] = name.replace('.time_mix_r' ,'.time_mix_receptance' )
if name != "head.weight":
A_ : Union[str, Any] = """rwkv.""" + name
A_ : List[str] = weight
return state_dict
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[str] ,__lowercase : List[str] ,__lowercase : List[Any]=None ,__lowercase : List[Any]=None ,__lowercase : List[Any]=False ,__lowercase : str=None ):
'''simple docstring'''
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
A_ : Any = 5_02_77
A_ : Dict = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
A_ : List[Any] = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
A_ : Tuple = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
A_ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A_ : Union[str, Any] = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
A_ : Dict = RwkvConfig(
vocab_size=lowerCamelCase_ ,num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] ,hidden_size=HIDEN_SIZE_MAPPING[size] ,)
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
A_ : List[str] = hf_hub_download(lowerCamelCase_ ,lowerCamelCase_ )
A_ : Any = torch.load(lowerCamelCase_ ,map_location='cpu' )
A_ : Tuple = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
A_ : str = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ ,os.path.join(lowerCamelCase_ ,lowerCamelCase_ ) )
if index is not None:
A_ : Union[str, Any] = os.path.join(lowerCamelCase_ ,lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ ,'w' ,encoding='utf-8' ) as f:
A_ : Union[str, Any] = json.dumps(lowerCamelCase_ ,indent=2 ,sort_keys=lowerCamelCase_ ) + """\n"""
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
A_ : Any = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A_ : Dict = torch.load(os.path.join(lowerCamelCase_ ,lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} ,os.path.join(lowerCamelCase_ ,lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
A_ : Any = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ ,max_shard_size='2GB' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
_UpperCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 358 | import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''mask2former'''
lowerCamelCase_ = ['''swin''']
lowerCamelCase_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowercase = None , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 2_5_6 , lowercase = 1_0_2_4 , lowercase = "relu" , lowercase = 6 , lowercase = 1_0 , lowercase = 8 , lowercase = 0.0 , lowercase = 2_0_4_8 , lowercase = False , lowercase = False , lowercase = 4 , lowercase = 2_5_5 , lowercase = 1_0_0 , lowercase = 0.1 , lowercase = 2.0 , lowercase = 5.0 , lowercase = 5.0 , lowercase = 1_2_5_4_4 , lowercase = 3.0 , lowercase = 0.75 , lowercase = 0.02 , lowercase = 1.0 , lowercase = True , lowercase = [4, 8, 1_6, 3_2] , lowercase = None , **lowercase , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
A_ : List[str] = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase , lowercase ):
A_ : str = backbone_config.pop('model_type' )
A_ : List[str] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
A_ : List[Any] = backbone_config
A_ : Optional[Any] = feature_size
A_ : int = mask_feature_size
A_ : Tuple = hidden_dim
A_ : Dict = encoder_feedforward_dim
A_ : int = activation_function
A_ : str = encoder_layers
A_ : Tuple = decoder_layers
A_ : Tuple = num_attention_heads
A_ : str = dropout
A_ : List[str] = dim_feedforward
A_ : List[str] = pre_norm
A_ : Tuple = enforce_input_projection
A_ : Dict = common_stride
A_ : Union[str, Any] = ignore_value
A_ : List[Any] = num_queries
A_ : List[Any] = no_object_weight
A_ : int = class_weight
A_ : int = mask_weight
A_ : Optional[Any] = dice_weight
A_ : int = train_num_points
A_ : Optional[int] = oversample_ratio
A_ : Tuple = importance_sample_ratio
A_ : Union[str, Any] = init_std
A_ : List[Any] = init_xavier_std
A_ : Optional[Any] = use_auxiliary_loss
A_ : Dict = feature_strides
A_ : List[Any] = output_auxiliary_logits
A_ : Optional[int] = decoder_layers
super().__init__(**lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
return cls(
backbone_config=lowercase , **lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.backbone_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 192 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __a , __a , __a = None , __a = 5_0257 , __a = 1024 , __a = 768 , __a = 12 , __a = 12 , __a = None , __a = "gelu_new" , __a = 0.1 , __a = 0.1 , __a = 0.1 , __a = 1E-5 , __a = 0.02 , __a = True , __a = True , __a = False , __a = False , ):
'''simple docstring'''
super().__init__()
__a : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
__a : Union[str, Any] = prefix_inner_dim
__a : Any = prefix_hidden_dim
__a : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__a : Union[str, Any] = (
nn.Linear(self.prefix_hidden_dim , __a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__a : Dict = GPTaConfig(
vocab_size=__a , n_positions=__a , n_embd=__a , n_layer=__a , n_head=__a , n_inner=__a , activation_function=__a , resid_pdrop=__a , embd_pdrop=__a , attn_pdrop=__a , layer_norm_epsilon=__a , initializer_range=__a , scale_attn_weights=__a , use_cache=__a , scale_attn_by_inverse_layer_idx=__a , reorder_and_upcast_attn=__a , )
__a : List[str] = GPTaLMHeadModel(__a )
def __UpperCAmelCase ( self , __a , __a , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = self.transformer.transformer.wte(__a )
__a : Dict = self.encode_prefix(__a )
__a : str = self.decode_prefix(__a )
__a : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__a : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__a : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
__a : List[str] = self.transformer(inputs_embeds=__a , labels=__a , attention_mask=__a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
return torch.zeros(__a , self.prefix_length , dtype=torch.intaa , device=__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.encode_prefix(__a )
@torch.no_grad()
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = torch.split(__a , 1 , dim=0 )
__a : Union[str, Any] = []
__a : Optional[Any] = []
for feature in features:
__a : Optional[int] = self.decode_prefix(feature.to(__a ) ) # back to the clip feature
# Only support beam search for now
__a , __a : List[str] = self.generate_beam(
input_embeds=__a , device=__a , eos_token_id=__a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__a : Optional[int] = torch.stack(__a )
__a : Optional[int] = torch.stack(__a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a = 5 , __a = 67 , __a = 1.0 , __a = None , ):
'''simple docstring'''
__a : Any = eos_token_id
__a : Any = None
__a : Union[str, Any] = None
__a : List[Any] = torch.ones(__a , device=__a , dtype=torch.int )
__a : Optional[Any] = torch.zeros(__a , device=__a , dtype=torch.bool )
if input_embeds is not None:
__a : Optional[Any] = input_embeds
else:
__a : Union[str, Any] = self.transformer.transformer.wte(__a )
for i in range(__a ):
__a : str = self.transformer(inputs_embeds=__a )
__a : int = outputs.logits
__a : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__a : Optional[Any] = logits.softmax(-1 ).log()
if scores is None:
__a , __a : Dict = logits.topk(__a , -1 )
__a : Union[str, Any] = generated.expand(__a , *generated.shape[1:] )
__a , __a : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__a : Union[str, Any] = next_tokens
else:
__a : Any = tokens.expand(__a , *tokens.shape[1:] )
__a : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 )
else:
__a : Dict = -float(np.inf )
__a : Optional[int] = 0
__a : Tuple = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__a : List[Any] = scores_sum / seq_lengths[:, None]
__a , __a : int = scores_sum_average.view(-1 ).topk(__a , -1 )
__a : Union[str, Any] = next_tokens // scores_sum.shape[1]
__a : Tuple = seq_lengths[next_tokens_source]
__a : Optional[Any] = next_tokens % scores_sum.shape[1]
__a : Any = next_tokens.unsqueeze(1 )
__a : Tuple = tokens[next_tokens_source]
__a : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
__a : List[Any] = generated[next_tokens_source]
__a : Union[str, Any] = scores_sum_average * seq_lengths
__a : Union[str, Any] = is_stopped[next_tokens_source]
__a : Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__a : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
__a : List[str] = is_stopped + next_tokens.eq(__a ).squeeze()
if is_stopped.all():
break
__a : str = scores / seq_lengths
__a : Dict = scores.argsort(descending=__a )
# tokens tensors are already padded to max_seq_length
__a : Optional[Any] = [tokens[i] for i in order]
__a : Tuple = torch.stack(__a , dim=0 )
__a : str = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 27 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
lowercase = None
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , )-> List[Any]:
import pyspark
def generate_fn():
UpperCamelCase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
UpperCamelCase = df_with_partition_id.select("""*""" ).where(F"part_id = {partition_id}" ).drop("""part_id""" )
UpperCamelCase = partition_df.collect()
UpperCamelCase = 0
for row in rows:
yield F"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = df
UpperCamelCase = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> List[str]:
"""simple docstring"""
yield from self.generate_examples_fn()
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df , partition_order=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable":
"""simple docstring"""
UpperCamelCase = self.split_shard_indices_by_worker(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df , partition_order=_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
lowercase = SparkConfig
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
import pyspark
UpperCamelCase = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase = df
UpperCamelCase = working_dir
super().__init__(
cache_dir=_SCREAMING_SNAKE_CASE , config_name=str(self.df.semanticHash() ) , **_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
def create_cache_and_write_probe(_SCREAMING_SNAKE_CASE ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_SCREAMING_SNAKE_CASE )
UpperCamelCase = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_SCREAMING_SNAKE_CASE , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_SCREAMING_SNAKE_CASE ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def A__ ( self ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_SCREAMING_SNAKE_CASE ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
UpperCamelCase = self.df.count()
UpperCamelCase = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase = (
self.df.limit(_SCREAMING_SNAKE_CASE )
.repartition(1 )
.mapInArrow(_SCREAMING_SNAKE_CASE , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase = min(_SCREAMING_SNAKE_CASE , int(approx_total_size / max_shard_size ) )
UpperCamelCase = self.df.repartition(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
"""simple docstring"""
import pyspark
UpperCamelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter
UpperCamelCase = os.path.join(self._working_dir , os.path.basename(_SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath
UpperCamelCase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase = self.config.features
UpperCamelCase = self._writer_batch_size
UpperCamelCase = self._fs.storage_options
def write_arrow(_SCREAMING_SNAKE_CASE ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase = pyspark.TaskContext().taskAttemptId()
UpperCamelCase = next(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
UpperCamelCase = 0
UpperCamelCase = writer_class(
features=_SCREAMING_SNAKE_CASE , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=_SCREAMING_SNAKE_CASE , storage_options=_SCREAMING_SNAKE_CASE , embed_local_files=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = pa.Table.from_batches([first_batch] )
writer.write_table(_SCREAMING_SNAKE_CASE )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase ,UpperCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
UpperCamelCase = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=_SCREAMING_SNAKE_CASE , storage_options=_SCREAMING_SNAKE_CASE , embed_local_files=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = pa.Table.from_batches([batch] )
writer.write_table(_SCREAMING_SNAKE_CASE )
if writer._num_bytes > 0:
UpperCamelCase ,UpperCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , os.path.basename(_SCREAMING_SNAKE_CASE ) )
shutil.move(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = (
self.df.mapInArrow(_SCREAMING_SNAKE_CASE , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "arrow" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
self._validate_cache_dir()
UpperCamelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_SCREAMING_SNAKE_CASE )
UpperCamelCase = not is_remote_filesystem(self._fs )
UpperCamelCase = os.path.join if is_local else posixpath.join
UpperCamelCase = """-TTTTT-SSSSS-of-NNNNN"""
UpperCamelCase = F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
UpperCamelCase = path_join(self._output_dir , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = []
UpperCamelCase = []
for task_id, content in self._prepare_split_single(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
(
(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_SCREAMING_SNAKE_CASE )
UpperCamelCase = total_num_examples
UpperCamelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"Renaming {total_shards} shards." )
if total_shards > 1:
UpperCamelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
rename(
_SCREAMING_SNAKE_CASE , fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace("""TTTTT-SSSSS""" , F"{global_shard_id:05d}" ).replace("""NNNNN""" , F"{total_shards:05d}" ) , )
UpperCamelCase = []
UpperCamelCase = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase ,UpperCamelCase = task_id_and_num_shards[i]
for shard_id in range(_SCREAMING_SNAKE_CASE ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ).map(lambda _SCREAMING_SNAKE_CASE : _rename_shard(*_SCREAMING_SNAKE_CASE ) ).collect()
else:
# don't use any pattern
UpperCamelCase = 0
UpperCamelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace(_SCREAMING_SNAKE_CASE , """""" ) , )
def A__ ( self , _SCREAMING_SNAKE_CASE , ) -> SparkExamplesIterable:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 363 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( __UpperCamelCase )-> Dict: # picklable for multiprocessing
return x.sum()
def lowercase__ ( __UpperCamelCase )-> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class a_ :
lowercase = 42
lowercase = 42
class a_ ( lowerCamelCase ):
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"""a""": 2, """b""": 3}
UpperCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
UpperCamelCase = {"""a""": 2, """b""": 0, """c""": 2}
UpperCamelCase = {
"""a""": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": 3, """b""": 4}
UpperCamelCase = {"""a""": 5, """b""": 6}
UpperCamelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
class a_ :
lowercase = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(__UpperCamelCase )}
UpperCamelCase = map_nested(lambda __UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( lowerCamelCase ):
@require_tf
def A__ ( self ) -> Any:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = A(x=1 , y="""foobar""" )
UpperCamelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(__UpperCamelCase ) == expected_output
UpperCamelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
UpperCamelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
return text.split()
def lowercase__ ( __UpperCamelCase )-> List[str]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__ ( )-> int:
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__UpperCamelCase ) == 4
| 183 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ["pixel_values"]
def __init__( self : Tuple , A : bool = True , A : Dict[str, int] = None , A : int = 0.9 , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : Dict[str, int] = None , A : Union[int, float] = 1 / 255 , A : bool = True , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Dict , ):
super().__init__(**A )
_UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 224}
_UpperCAmelCase : List[Any] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCAmelCase : Tuple = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : Dict = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : Optional[Any] = crop_pct
_UpperCAmelCase : Union[str, Any] = resample
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : Any = crop_size
_UpperCAmelCase : Tuple = do_rescale
_UpperCAmelCase : Union[str, Any] = rescale_factor
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _A ( self : Optional[Any] , A : np.ndarray , A : Dict[str, int] , A : Optional[float] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Any , ):
_UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
_UpperCAmelCase : Optional[int] = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_UpperCAmelCase : Any = int(size["height"] / crop_pct )
else:
_UpperCAmelCase : List[Any] = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(A ) )
_UpperCAmelCase : Optional[int] = get_resize_output_image_size(A , size=A , default_to_square=A )
else:
if "shortest_edge" in size:
_UpperCAmelCase : Dict = get_resize_output_image_size(A , size=size["shortest_edge"] , default_to_square=A )
elif "height" in size and "width" in size:
_UpperCAmelCase : List[str] = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(A ) )
return resize(A , size=A , resample=A , data_format=A , **A )
def _A ( self : Dict , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : List[Any] , ):
_UpperCAmelCase : Tuple = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def _A ( self : List[str] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Any , ):
return rescale(A , scale=A , data_format=A , **A )
def _A ( self : Optional[int] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[int] , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def _A ( self : int , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : int = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[Any] = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase : Dict = resample if resample is not None else self.resample
_UpperCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[str] = size if size is not None else self.size
_UpperCAmelCase : str = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[Any] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : List[str] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : Optional[int] = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_center_crop:
_UpperCAmelCase : str = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
_UpperCAmelCase : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_UpperCAmelCase : Union[str, Any] = [self.normalize(image=A , mean=A , std=A ) for image in images]
_UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : str = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
| 31 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = """▁"""
__SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__SCREAMING_SNAKE_CASE : str = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase: Optional[int] = PegasusTokenizer
__UpperCamelCase: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , A : List[str]=None , A : Union[str, Any]=None , A : Optional[int]="<pad>" , A : Tuple="</s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<mask_2>" , A : Dict="<mask_1>" , A : Union[str, Any]=None , A : int=103 , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = offset
if additional_special_tokens is not None:
if not isinstance(A , A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A )}, but is"""
F""" {type(A )}""" )
_UpperCAmelCase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A ) , self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_UpperCAmelCase : Any = additional_special_tokens_extended
else:
_UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[Any] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _A ( self : List[str] , A : Optional[Any] ):
_UpperCAmelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : str , A : List , A : Optional[List] = None , A : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : Optional[int] , A : Union[str, Any] , A : int=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 31 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : str = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'sew'
def __init__( self : Tuple , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : List[Any]=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Union[str, Any]="group" , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_ : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Union[str, Any]=1_28 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[Any]=0.05 , lowerCAmelCase_ : Optional[int]=10 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : Optional[int]="mean" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=2_56 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Union[str, Any]=2 , **lowerCAmelCase_ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
A__ : int =hidden_size
A__ : Optional[Any] =feat_extract_norm
A__ : str =feat_extract_activation
A__ : Dict =list(lowerCAmelCase_ )
A__ : Union[str, Any] =list(lowerCAmelCase_ )
A__ : str =list(lowerCAmelCase_ )
A__ : Optional[Any] =conv_bias
A__ : int =num_conv_pos_embeddings
A__ : str =num_conv_pos_embedding_groups
A__ : Optional[int] =len(self.conv_dim )
A__ : int =num_hidden_layers
A__ : List[Any] =intermediate_size
A__ : int =squeeze_factor
A__ : str =hidden_act
A__ : Any =num_attention_heads
A__ : int =hidden_dropout
A__ : List[Any] =attention_dropout
A__ : Any =activation_dropout
A__ : Dict =feat_proj_dropout
A__ : Optional[int] =final_dropout
A__ : Union[str, Any] =layerdrop
A__ : Union[str, Any] =layer_norm_eps
A__ : Optional[Any] =initializer_range
A__ : Optional[int] =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : int =apply_spec_augment
A__ : Any =mask_time_prob
A__ : Union[str, Any] =mask_time_length
A__ : Dict =mask_time_min_masks
A__ : int =mask_feature_prob
A__ : Dict =mask_feature_length
A__ : Any =mask_feature_min_masks
# ctc loss
A__ : Optional[int] =ctc_loss_reduction
A__ : List[Any] =ctc_zero_infinity
# sequence classification
A__ : List[Any] =use_weighted_layer_sum
A__ : Union[str, Any] =classifier_proj_size
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 136 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "visual_bert"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=512 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = visual_embedding_dim
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
_snake_case = bypass_transformer
_snake_case = special_visual_initialize | 341 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_snake_case = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Dict:
_snake_case = """sgugger/tiny-distilbert-classification"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Optional[Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Union[str, Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def lowercase (self ) -> Tuple:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> Union[str, Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Dict:
_snake_case = """sshleifer/tinier_bart"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Any:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> int:
_snake_case = """sshleifer/tinier_bart"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> str:
_snake_case = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() )
def lowercase (self ) -> int:
_snake_case = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() ) | 341 | 1 |
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCamelCase__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.1_5},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
UpperCamelCase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCamelCase__ = """allenai"""
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = dict((re.sub(r"@@$", "", __snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$", "</w>", __snake_case ), v) for k, v in d.items() )
UpperCAmelCase__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
UpperCAmelCase__ = d[k] # restore
return da
def lowerCAmelCase_ ( __A, __A ) -> int:
'''simple docstring'''
assert os.path.exists(__snake_case )
os.makedirs(__snake_case, exist_ok=__snake_case )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase__ = basename(__snake_case )
UpperCAmelCase__ = dirname(__snake_case )
UpperCAmelCase__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ = cls.hub_models()
UpperCAmelCase__ = {"bpe": "fastbpe", "tokenizer": "moses"}
UpperCAmelCase__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
UpperCAmelCase__ = hub_utils.from_pretrained(
__snake_case, __snake_case, __snake_case, archive_map=__snake_case, **__snake_case )
UpperCAmelCase__ = vars(chkpt["args"]["model"] )
UpperCAmelCase__ = args["source_lang"]
UpperCAmelCase__ = args["target_lang"]
UpperCAmelCase__ = dirname(__snake_case )
UpperCAmelCase__ = basename(__snake_case )
# dicts
UpperCAmelCase__ = os.path.join(__snake_case, f"""dict.{src_lang}.txt""" )
UpperCAmelCase__ = os.path.join(__snake_case, f"""dict.{tgt_lang}.txt""" )
UpperCAmelCase__ = Dictionary.load(__snake_case )
UpperCAmelCase__ = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ = len(__snake_case )
UpperCAmelCase__ = os.path.join(__snake_case, "vocab-src.json" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(__snake_case, "w", encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ = False
break
UpperCAmelCase__ = Dictionary.load(__snake_case )
UpperCAmelCase__ = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ = len(__snake_case )
UpperCAmelCase__ = os.path.join(__snake_case, "vocab-tgt.json" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(__snake_case, "w", encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# merges_file (bpecodes)
UpperCAmelCase__ = os.path.join(__snake_case, VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ = os.path.join(__snake_case, __snake_case )
if os.path.exists(__snake_case ):
break
with open(__snake_case, encoding="utf-8" ) as fin:
UpperCAmelCase__ = fin.read()
UpperCAmelCase__ = re.sub(r" \d+$", "", __snake_case, 0, re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(__snake_case, "w", encoding="utf-8" ) as fout:
fout.write(__snake_case )
# model config
UpperCAmelCase__ = os.path.join(__snake_case, "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
UpperCAmelCase__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
UpperCAmelCase__ = 5
UpperCAmelCase__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ = best_score_hparams[model_dir]["length_penalty"]
else:
UpperCAmelCase__ = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(__snake_case, "w", encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# tokenizer config
UpperCAmelCase__ = os.path.join(__snake_case, __snake_case )
UpperCAmelCase__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_024,
"do_lower_case": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(__snake_case, "w", encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case, ensure_ascii=__snake_case, indent=__snake_case ) )
# model
UpperCAmelCase__ = chkpt["models"][0]
UpperCAmelCase__ = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__snake_case, __snake_case )
UpperCAmelCase__ = FSMTConfig.from_pretrained(__snake_case )
UpperCAmelCase__ = FSMTForConditionalGeneration(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case, strict=__snake_case )
# save
UpperCAmelCase__ = os.path.join(__snake_case, __snake_case )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(__snake_case, __snake_case )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 356 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 0 |
a_ = '''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
a_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 340 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCamelCase__ = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCamelCase__ = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
UpperCamelCase__ = time.time()
locka.acquire(UpperCamelCase__ )
assert time.time() - _start > timeout
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
UpperCamelCase__ = '''a''' * 1000 + '''.lock'''
UpperCamelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(UpperCamelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase__ ):
locka.acquire(0 )
| 351 | lowercase = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : float ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : int = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCamelCase : Optional[Any] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
__UpperCamelCase : Optional[int] = F"{src_lang}-{tgt_lang}"
__UpperCamelCase : Dict = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__UpperCamelCase : List[str] = os.path.join(snake_case__ , "README.md" )
print(F"Generating {path}" )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
_lowerCAmelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model_name.split('''-''')
_lowerCAmelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 298 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase__ : Tuple =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Optional[int] ="""CLIPImageProcessor"""
UpperCamelCase__ : List[str] =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__UpperCamelCase : Any =kwargs.pop('feature_extractor' )
__UpperCamelCase : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__UpperCamelCase : List[Any] =self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
__UpperCamelCase : Tuple =self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer.model_input_names
__UpperCamelCase : int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def __lowercase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 367 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A ( a_ ) -> int:
__UpperCamelCase : List[Any] =botoa.client('iam' )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ ,AssumeRolePolicyDocument=json.dumps(a_ ,indent=2 ) )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ ,PolicyName=F'{role_name}_policy_permission' ,PolicyDocument=json.dumps(a_ ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : List[Any] =botoa.client('iam' )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def A ( ) -> Tuple:
__UpperCamelCase : Any =_ask_options(
'How do you want to authorize?' ,['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] ,a_ ,)
__UpperCamelCase : str =None
if credentials_configuration == 0:
__UpperCamelCase : str =_ask_field('Enter your AWS Profile name: [default] ' ,default='default' )
__UpperCamelCase : Optional[Any] =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__UpperCamelCase : int =_ask_field('AWS Access Key ID: ' )
__UpperCamelCase : Dict =aws_access_key_id
__UpperCamelCase : Any =_ask_field('AWS Secret Access Key: ' )
__UpperCamelCase : Optional[Any] =aws_secret_access_key
__UpperCamelCase : Tuple =_ask_field('Enter your AWS Region: [us-east-1]' ,default='us-east-1' )
__UpperCamelCase : List[str] =aws_region
__UpperCamelCase : Any =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' ,['Provide IAM Role name', 'Create new IAM role using credentials'] ,a_ ,)
if role_management == 0:
__UpperCamelCase : Optional[Any] =_ask_field('Enter your IAM role name: ' )
else:
__UpperCamelCase : Dict ='accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(a_ )
__UpperCamelCase : List[Any] =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : int =None
if is_custom_docker_image:
__UpperCamelCase : List[Any] =_ask_field('Enter your Docker image: ' ,lambda a_ : str(a_ ).lower() )
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Optional[Any] =None
if is_sagemaker_inputs_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : str =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Dict =None
if is_sagemaker_metrics_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : int =_ask_options(
'What is the distributed mode?' ,['No distributed training', 'Data parallelism'] ,_convert_sagemaker_distributed_mode ,)
__UpperCamelCase : int ={}
__UpperCamelCase : str =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_dynamo:
__UpperCamelCase : Dict ='dynamo_'
__UpperCamelCase : Optional[int] =_ask_options(
'Which dynamo backend would you like to use?' ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_custom_options:
__UpperCamelCase : List[str] =_ask_options(
'Which mode do you want to use?' ,a_ ,lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] ,default='default' ,)
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__UpperCamelCase : int =_ask_options(
a_ ,a_ ,lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCamelCase : List[str] =_ask_field(a_ ,lambda a_ : str(a_ ).lower() ,default='ml.p3.2xlarge' )
__UpperCamelCase : Union[str, Any] =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCamelCase : List[str] =_ask_field(
'How many machines do you want use? [1]: ' ,a_ ,default=1 ,)
__UpperCamelCase : Optional[Any] =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' ,['no', 'fp16', 'bf16', 'fp8'] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=a_ ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=a_ ,use_cpu=a_ ,dynamo_config=a_ ,eca_instance_type=a_ ,profile=a_ ,region=a_ ,iam_role_name=a_ ,mixed_precision=a_ ,num_machines=a_ ,sagemaker_inputs_file=a_ ,sagemaker_metrics_file=a_ ,)
| 245 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.