code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = '''RegNetConfig'''
# Base docstring
_A = '''facebook/regnet-y-040'''
_A = [1, 1_088, 7, 7]
# Image classification docstring
_A = '''facebook/regnet-y-040'''
_A = '''tabby, tabby cat'''
_A = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 3 , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = "relu" , ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Convad(
_a , _a , kernel_size=_a , stride=_a , padding=kernel_size // 2 , groups=_a , bias=_a , )
UpperCamelCase_ = nn.BatchNormad(_a )
UpperCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.convolution(_a )
UpperCamelCase_ = self.normalization(_a )
UpperCamelCase_ = self.activation(_a )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCamelCase_ = config.num_channels
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
UpperCamelCase_ = self.embedder(_a )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Convad(_a , _a , kernel_size=1 , stride=_a , bias=_a )
UpperCamelCase_ = nn.BatchNormad(_a )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.convolution(_a )
UpperCamelCase_ = self.normalization(_a )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
UpperCamelCase_ = nn.Sequential(
nn.Convad(_a , _a , kernel_size=1 ) , nn.ReLU() , nn.Convad(_a , _a , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.pooler(_a )
UpperCamelCase_ = self.attention(_a )
UpperCamelCase_ = hidden_state * attention
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = in_channels != out_channels or stride != 1
UpperCamelCase_ = max(1 , out_channels // config.groups_width )
UpperCamelCase_ = (
RegNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase_ = nn.Sequential(
RegNetConvLayer(_a , _a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , stride=_a , groups=_a , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
UpperCamelCase_ = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = hidden_state
UpperCamelCase_ = self.layer(_a )
UpperCamelCase_ = self.shortcut(_a )
hidden_state += residual
UpperCamelCase_ = self.activation(_a )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = in_channels != out_channels or stride != 1
UpperCamelCase_ = max(1 , out_channels // config.groups_width )
UpperCamelCase_ = (
RegNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase_ = nn.Sequential(
RegNetConvLayer(_a , _a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , stride=_a , groups=_a , activation=config.hidden_act ) , RegNetSELayer(_a , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
UpperCamelCase_ = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = hidden_state
UpperCamelCase_ = self.layer(_a )
UpperCamelCase_ = self.shortcut(_a )
hidden_state += residual
UpperCamelCase_ = self.activation(_a )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 2 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
UpperCamelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_a , _a , _a , stride=_a , ) , *[layer(_a , _a , _a ) for _ in range(depth - 1 )] , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.layers(_a )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a , config.depths[1:] ):
self.stages.append(RegNetStage(_a , _a , _a , depth=_a ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase_ = hidden_states + (hidden_state,)
UpperCamelCase_ = stage_module(_a )
if output_hidden_states:
UpperCamelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class lowercase_ ( lowercase__ ):
A__ : int = RegNetConfig
A__ : Any = '''regnet'''
A__ : Optional[Any] = '''pixel_values'''
A__ : List[str] = True
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if isinstance(_a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if isinstance(_a , _a ):
UpperCamelCase_ = value
_A = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase_ ( lowercase__ ):
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super().__init__(_a )
UpperCamelCase_ = config
UpperCamelCase_ = RegNetEmbeddings(_a )
UpperCamelCase_ = RegNetEncoder(_a )
UpperCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ = self.embedder(_a )
UpperCamelCase_ = self.encoder(
_a , output_hidden_states=_a , return_dict=_a )
UpperCamelCase_ = encoder_outputs[0]
UpperCamelCase_ = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase_ ( lowercase__ ):
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super().__init__(_a )
UpperCamelCase_ = config.num_labels
UpperCamelCase_ = RegNetModel(_a )
# classification head
UpperCamelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ):
"""simple docstring"""
UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ = self.regnet(_a , output_hidden_states=_a , return_dict=_a )
UpperCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase_ = self.classifier(_a )
UpperCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase_ = 'single_label_classification'
else:
UpperCamelCase_ = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase_ = MSELoss()
if self.num_labels == 1:
UpperCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase_ = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase_ = CrossEntropyLoss()
UpperCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase_ = BCEWithLogitsLoss()
UpperCamelCase_ = loss_fct(_a , _a )
if not return_dict:
UpperCamelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 122
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271
| 0
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
_a : int= get_logger(__name__)
class UpperCamelCase :
def __init__(self : Optional[Any] , _A : Optional[Any] , _A : List[Any]=None) -> Dict:
__snake_case : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , _A , getattr(_A , _A))
__snake_case : Optional[int] = module._original_module if isinstance(_A , _PatchedModuleObj) else module
class UpperCamelCase :
UpperCAmelCase : Optional[Any] = []
def __init__(self : str , _A : Union[str, Any] , _A : str , _A : Tuple , _A : List[Any]=None) -> Union[str, Any]:
__snake_case : List[Any] = obj
__snake_case : Tuple = target
__snake_case : int = new
__snake_case : Union[str, Any] = target.split('.')[0]
__snake_case : Optional[int] = {}
__snake_case : Dict = attrs or []
def __enter__(self : Optional[Any]) -> Optional[int]:
*__snake_case , __snake_case : Dict = self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_A)):
try:
__snake_case : List[str] = import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Union[str, Any] = getattr(self.obj , _A)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_A , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
__snake_case : Optional[int] = obj_attr
# patch at top level
setattr(self.obj , _A , _PatchedModuleObj(_A , attrs=self.attrs))
__snake_case : Any = getattr(self.obj , _A)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_A , _A , _PatchedModuleObj(getattr(_A , _A , _A) , attrs=self.attrs))
__snake_case : List[str] = getattr(_A , _A)
# finally set the target attribute
setattr(_A , _A , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : str = getattr(import_module('.'.join(_A)) , _A)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _A) is attr_value:
__snake_case : str = getattr(self.obj , _A)
setattr(self.obj , _A , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Optional[int] = globals()['__builtins__'][target_attr]
setattr(self.obj , _A , self.new)
else:
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.")
def __exit__(self : List[Any] , *_A : Tuple) -> Dict:
for attr in list(self.original):
setattr(self.obj , _A , self.original.pop(_A))
def _lowercase (self : int) -> Tuple:
self.__enter__()
self._active_patches.append(self)
def _lowercase (self : List[str]) -> List[str]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 95
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_a : Tuple= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : int , *_A : str , **_A : List[str]) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 95
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar("""KT""")
__UpperCAmelCase = TypeVar("""VT""")
class UpperCamelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : KT | str = "root" , lowerCamelCase_ : VT | None = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = key
SCREAMING_SNAKE_CASE : List[str] = value
SCREAMING_SNAKE_CASE : list[Node[KT, VT]] = []
def __repr__( self : Tuple ):
'''simple docstring'''
return f'''Node({self.key}: {self.value})'''
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return len(self.forward )
class UpperCamelCase__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : int = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = p
SCREAMING_SNAKE_CASE : Optional[int] = max_level
def __str__( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(self )
if len(lowerCAmelCase_ ) == 0:
return f'''SkipList(level={self.level})'''
SCREAMING_SNAKE_CASE : Optional[Any] = max((len(str(lowerCAmelCase_ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(lowerCAmelCase_ , 4 ) + 4
SCREAMING_SNAKE_CASE : List[Any] = self.head
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(lowerCAmelCase_ , """-""" ) + """* """ * len(lowerCAmelCase_ ) )
lines.append(""" """ * label_size + """| """ * len(lowerCAmelCase_ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(lowerCAmelCase_ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = node.forward
lines.append("""None""".ljust(lowerCAmelCase_ ) + """* """ * len(lowerCAmelCase_ ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(lowerCAmelCase_ )
def __iter__( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE : Tuple = node.forward[0]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowerCAmelCase_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : KT ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(lowerCAmelCase_ )
if node is not None:
for i, update_node in enumerate(lowerCAmelCase_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE : str = node.forward[i]
else:
SCREAMING_SNAKE_CASE : List[Any] = update_node.forward[:i]
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : KT , lowerCamelCase_ : VT ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self._locate_node(lowerCAmelCase_ )
if node is not None:
SCREAMING_SNAKE_CASE : Dict = value
else:
SCREAMING_SNAKE_CASE : Tuple = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowerCAmelCase_ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE : List[str] = level
SCREAMING_SNAKE_CASE : str = Node(lowerCAmelCase_ , lowerCAmelCase_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : int = new_node
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : VT ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._locate_node(lowerCAmelCase_ )
if node is not None:
return node.value
return None
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
SCREAMING_SNAKE_CASE : Any = skip_list.head
SCREAMING_SNAKE_CASE : Optional[Any] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : str = node.forward[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = node.value
assert len(__snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
SCREAMING_SNAKE_CASE : List[Any] = skip_list.head
SCREAMING_SNAKE_CASE : Dict = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward[0]
SCREAMING_SNAKE_CASE : Any = node.value
if len(__snake_case ) != 4:
print()
assert len(__snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = SkipList()
assert skip_list.find("""Some key""" ) is None
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCamelCase_ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A ( ):
"""simple docstring"""
def is_sorted(lowerCamelCase_ ):
return all(next_item >= item for item, next_item in zip(__snake_case , lst[1:] ) )
SCREAMING_SNAKE_CASE : Dict = SkipList()
for i in range(10 ):
skip_list.insert(__snake_case , __snake_case )
assert is_sorted(list(__snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__snake_case ) )
def __A ( ):
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 323
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ : list[float] =list(lowerCAmelCase_ )
A__ : Optional[int] =degree
def __add__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
A__ : int =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
A__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int | float ) -> int | float:
'''simple docstring'''
A__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> str:
'''simple docstring'''
A__ : Optional[int] =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.__str__()
def lowercase__ ( self : str ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * self.degree
for i in range(self.degree ):
A__ : Union[str, Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + 2)
A__ : Any =constant
for i in range(self.degree + 1 ):
A__ : str =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Optional[int] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase_ )
| 134
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
lowerCAmelCase__ : Dict = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCAmelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
__A : Any = """openai/whisper-base"""
__A : Optional[Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
__A : Tuple = """transcriber"""
__A : Union[str, Any] = WhisperProcessor
__A : Optional[int] = WhisperForConditionalGeneration
__A : List[Any] = ["""audio"""]
__A : List[Any] = ["""text"""]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__lowerCamelCase , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__lowerCamelCase )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 283
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE_ : int = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __UpperCAmelCase ( self : int ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
a = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
a = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> str:
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
a = "Hello World!"
a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> int:
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
# fmt: off
a = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 107
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase ( snake_case__ : int ) -> Any:
UpperCamelCase : Optional[int] = SwinvaConfig()
UpperCamelCase : str = swinva_name.split('_' )
UpperCamelCase : Any = name_split[1]
if "to" in name_split[3]:
UpperCamelCase : List[str] = int(name_split[3][-3:] )
else:
UpperCamelCase : List[str] = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase : List[Any] = int(name_split[2][-2:] )
else:
UpperCamelCase : str = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase : Tuple = 96
UpperCamelCase : str = (2, 2, 6, 2)
UpperCamelCase : List[str] = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase : Optional[Any] = 96
UpperCamelCase : int = (2, 2, 18, 2)
UpperCamelCase : str = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase : int = 128
UpperCamelCase : Optional[int] = (2, 2, 18, 2)
UpperCamelCase : int = (4, 8, 16, 32)
else:
UpperCamelCase : Dict = 192
UpperCamelCase : Any = (2, 2, 18, 2)
UpperCamelCase : str = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCamelCase : List[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase : Union[str, Any] = 21841
UpperCamelCase : List[Any] = 'huggingface/label-files'
UpperCamelCase : str = 'imagenet-22k-id2label.json'
UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : int = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : Dict = idalabel
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase : Dict = 1000
UpperCamelCase : str = 'huggingface/label-files'
UpperCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
UpperCamelCase : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[Any] = idalabel
UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase : Any = img_size
UpperCamelCase : Any = num_classes
UpperCamelCase : Optional[int] = embed_dim
UpperCamelCase : Optional[Any] = depths
UpperCamelCase : Optional[int] = num_heads
UpperCamelCase : Optional[Any] = window_size
return config
def UpperCamelCase ( snake_case__ : List[str] ) -> str:
if "patch_embed.proj" in name:
UpperCamelCase : Tuple = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase : List[Any] = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
UpperCamelCase : int = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
UpperCamelCase : Tuple = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
UpperCamelCase : Optional[Any] = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
UpperCamelCase : Optional[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
UpperCamelCase : Optional[int] = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase : List[str] = 'layernorm.bias'
if "head" in name:
UpperCamelCase : Optional[Any] = name.replace('head' , 'classifier' )
else:
UpperCamelCase : Optional[int] = 'swinv2.' + name
return name
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] ) -> List[Any]:
for key in orig_state_dict.copy().keys():
UpperCamelCase : Any = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase : Tuple = key.split('.' )
UpperCamelCase : Tuple = int(key_split[1] )
UpperCamelCase : Any = int(key_split[3] )
UpperCamelCase : Any = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase : List[str] = val[:dim, :]
UpperCamelCase : str = val[dim : dim * 2, :]
UpperCamelCase : str = val[-dim:, :]
else:
UpperCamelCase : int = val[:dim]
UpperCamelCase : Any = val[
dim : dim * 2
]
UpperCamelCase : Optional[int] = val[-dim:]
else:
UpperCamelCase : Union[str, Any] = val
return orig_state_dict
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : str ) -> List[Any]:
UpperCamelCase : Dict = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
UpperCamelCase : int = get_swinva_config(snake_case__ )
UpperCamelCase : List[str] = SwinvaForImageClassification(snake_case__ )
model.eval()
UpperCamelCase : Any = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
UpperCamelCase : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Dict = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
UpperCamelCase : Optional[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
UpperCamelCase : Tuple = image_processor(images=snake_case__ , return_tensors='pt' )
UpperCamelCase : Optional[Any] = timm_model(inputs['pixel_values'] )
UpperCamelCase : Union[str, Any] = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer
UpperCAmelCase__ : Optional[int] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase : List[str] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Dict = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase : Dict = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Optional[int] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
| 103
| 1
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__a = getLogger(__name__)
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCAmelCase ( a_: Tuple, a_: Optional[Any], a_: Union[str, Any], a_: str = 8, a_: Union[str, Any] = DEFAULT_DEVICE, a_: str=False, a_: Optional[Any]="summarization", a_: str=None, **a_: Any, ):
_UpperCAmelCase : Tuple = Path(_lowerCAmelCase ).open("w", encoding="utf-8" )
_UpperCAmelCase : List[str] = str(_lowerCAmelCase )
_UpperCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
_UpperCAmelCase : Dict = model.half()
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_UpperCAmelCase : Optional[Any] = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase, _lowerCAmelCase )
if prefix is None:
_UpperCAmelCase : Union[str, Any] = prefix or getattr(model.config, "prefix", "" ) or ""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase, _lowerCAmelCase ) ) ):
_UpperCAmelCase : Tuple = [prefix + text for text in examples_chunk]
_UpperCAmelCase : List[str] = tokenizer(_lowerCAmelCase, return_tensors="pt", truncation=_lowerCAmelCase, padding="longest" ).to(_lowerCAmelCase )
_UpperCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids, attention_mask=batch.attention_mask, **_lowerCAmelCase, )
_UpperCAmelCase : int = tokenizer.batch_decode(_lowerCAmelCase, skip_special_tokens=_lowerCAmelCase, clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
_UpperCAmelCase : Any = int(time.time() - start_time ) # seconds
_UpperCAmelCase : Dict = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )}
def __UpperCAmelCase ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __UpperCAmelCase ( a_: Any=True ):
_UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("model_name", type=_lowerCAmelCase, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path", type=_lowerCAmelCase, help="like cnn_dm/test.source" )
parser.add_argument("save_path", type=_lowerCAmelCase, help="where to save summaries" )
parser.add_argument("--reference_path", type=_lowerCAmelCase, required=_lowerCAmelCase, help="like cnn_dm/test.target" )
parser.add_argument("--score_path", type=_lowerCAmelCase, required=_lowerCAmelCase, default="metrics.json", help="where to save metrics" )
parser.add_argument("--device", type=_lowerCAmelCase, required=_lowerCAmelCase, default=_lowerCAmelCase, help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix", type=_lowerCAmelCase, required=_lowerCAmelCase, default=_lowerCAmelCase, help="will be added to the begininng of src examples" )
parser.add_argument("--task", type=_lowerCAmelCase, default="summarization", help="used for task_specific_params + metrics" )
parser.add_argument("--bs", type=_lowerCAmelCase, default=8, required=_lowerCAmelCase, help="batch size" )
parser.add_argument(
"--n_obs", type=_lowerCAmelCase, default=-1, required=_lowerCAmelCase, help="How many observations. Defaults to all." )
parser.add_argument("--fp16", action="store_true" )
parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results" )
parser.add_argument(
"--info", nargs="?", type=_lowerCAmelCase, const=datetime_now(), help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
), )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_UpperCAmelCase , _UpperCAmelCase : Any = parser.parse_known_args()
_UpperCAmelCase : List[str] = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_UpperCAmelCase : int = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_UpperCAmelCase : Any = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
_UpperCAmelCase : List[Any] = generate_summaries_or_translations(
_lowerCAmelCase, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **_lowerCAmelCase, )
if args.reference_path is None:
return {}
# Compute scores
_UpperCAmelCase : int = calculate_bleu if "translation" in args.task else calculate_rouge
_UpperCAmelCase : Any = [x.rstrip() for x in open(args.save_path ).readlines()]
_UpperCAmelCase : Optional[Any] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
_UpperCAmelCase : Dict = score_fn(_lowerCAmelCase, _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
_UpperCAmelCase : Any = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase, open(args.score_path, "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 145
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , A_=0 , ):
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : Dict = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : Optional[int] = num_choices
_UpperCAmelCase : str = scope
_UpperCAmelCase : Optional[int] = projection_dim
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : str = None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
_UpperCAmelCase : Optional[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFDPRContextEncoder(config=A_ )
_UpperCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ )
_UpperCAmelCase : Dict = model(A_ , token_type_ids=A_ )
_UpperCAmelCase : Any = model(A_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = TFDPRQuestionEncoder(config=A_ )
_UpperCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ )
_UpperCAmelCase : Dict = model(A_ , token_type_ids=A_ )
_UpperCAmelCase : Dict = model(A_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFDPRReader(config=A_ )
_UpperCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = config_and_inputs
_UpperCAmelCase : int = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowercase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = TFDPRModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = TFDPRReader.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class a ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_UpperCAmelCase : Tuple = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_UpperCAmelCase : Optional[int] = model(A_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCAmelCase : List[str] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 189
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "autoformer"
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = True , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 32 , A_ = 32 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_ = True , A_=True , A_ = 10 , A_ = 25 , A_ = 3 , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = prediction_length
_UpperCAmelCase : Dict = context_length if context_length is not None else prediction_length
_UpperCAmelCase : Tuple = distribution_output
_UpperCAmelCase : List[Any] = loss
_UpperCAmelCase : Optional[Any] = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : str = lags_sequence
_UpperCAmelCase : Union[str, Any] = scaling
_UpperCAmelCase : Union[str, Any] = num_dynamic_real_features
_UpperCAmelCase : int = num_static_real_features
_UpperCAmelCase : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Union[str, Any] = cardinality
else:
_UpperCAmelCase : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : int = embedding_dimension
else:
_UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Any = encoder_attention_heads
_UpperCAmelCase : str = decoder_attention_heads
_UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
_UpperCAmelCase : Any = decoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Dict = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : int = decoder_layerdrop
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : str = init_std
_UpperCAmelCase : Tuple = use_cache
# Autoformer
_UpperCAmelCase : str = label_length
_UpperCAmelCase : Any = moving_average
_UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 189
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
if rng is None:
a__ : Optional[int] =global_rng
a__ : Optional[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=2_0_0_0 , lowerCAmelCase__=2_4 , lowerCAmelCase__=2_4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6_0_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=True , ) -> Tuple:
'''simple docstring'''
a__ : int =parent
a__ : str =batch_size
a__ : int =min_seq_length
a__ : Any =max_seq_length
a__ : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Optional[int] =feature_size
a__ : Tuple =num_mel_bins
a__ : Optional[int] =padding_value
a__ : Optional[Any] =sampling_rate
a__ : List[str] =return_attention_mask
a__ : str =do_normalize
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
'''simple docstring'''
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a__ : Any =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ : Optional[int] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Optional[int] =[np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =SpeechaTextFeatureExtractionTester(self )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : List[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : List[Any] =[np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a__ : int =feature_extractor(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
a__ : Optional[int] =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a__ : Optional[Any] =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a__ : Dict =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : Union[str, Any] =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__ : Any =np.asarray(lowerCAmelCase__ )
a__ : List[str] =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a__ : str =feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Dict =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : int =["longest", "max_length", "do_not_pad"]
a__ : Optional[Any] =[None, 1_6, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : List[str] =feature_extractor(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ )
a__ : Dict =inputs.input_features
a__ : Optional[int] =inputs.attention_mask
a__ : List[Any] =[np.sum(lowerCAmelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : List[str] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Union[str, Any] =["longest", "max_length", "do_not_pad"]
a__ : Optional[int] =[None, 1_6, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Optional[int] =feature_extractor(
lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ )
a__ : Optional[int] =inputs.input_features
a__ : List[str] =inputs.attention_mask
a__ : Union[str, Any] =[np.sum(lowerCAmelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : List[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict =feature_extractor(
lowerCAmelCase__ , padding="max_length" , max_length=4 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
a__ : Optional[int] =inputs.input_features
a__ : Tuple =inputs.attention_mask
a__ : Optional[Any] =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Tuple =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Dict =feature_extractor(
lowerCAmelCase__ , padding="longest" , max_length=4 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
a__ : Union[str, Any] =inputs.input_features
a__ : Union[str, Any] =inputs.attention_mask
a__ : List[Any] =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
a__ : List[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : Any =feature_extractor(
lowerCAmelCase__ , padding="longest" , max_length=1_6 , truncation=lowerCAmelCase__ , return_tensors="np" , return_attention_mask=lowerCAmelCase__ , )
a__ : Tuple =inputs.input_features
a__ : List[Any] =inputs.attention_mask
a__ : int =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def _lowercase ( self ) -> int:
'''simple docstring'''
import torch
a__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Dict =np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
a__ : List[str] =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : List[str] =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ : Tuple =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
a__ : Tuple =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : List[str] =ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Any =np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
a__ : Any =self._load_datasamples(1 )
a__ : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Tuple =feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-4 ) )
| 95
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95
| 1
|
"""simple docstring"""
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A_ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : Dict = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_lowerCamelCase : Union[str, Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_lowerCamelCase : Optional[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Any=0.9 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=500 , UpperCAmelCase__ : str="gpt2-large" , UpperCAmelCase__ : Dict=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : Any=25 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=25 , ) ->Any:
'''simple docstring'''
A__ = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 14
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307
| 0
|
"""simple docstring"""
import os
import sys
_snake_case = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_snake_case = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 324
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = IFImgaImgSuperResolutionPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : str):
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self : str , A_ : Any , A_ : Optional[int]=0):
if str(A_).startswith('''mps'''):
lowerCAmelCase_ : Dict = torch.manual_seed(A_)
else:
lowerCAmelCase_ : Optional[Any] = torch.Generator(device=A_).manual_seed(A_)
lowerCAmelCase_ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A_)).to(A_)
lowerCAmelCase_ : Optional[int] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A_)).to(A_)
lowerCAmelCase_ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Tuple):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def UpperCAmelCase__ ( self : List[Any]):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self : Dict):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self : str):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self : List[str]):
self._test_save_load_local()
def UpperCAmelCase__ ( self : List[str]):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 103
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A__ : Tuple = get_logger(__name__)
class __snake_case :
_a = '''dummy_data'''
_a = '''datasets'''
_a = False
def __init__( self : Optional[Any] , A_ : str , A_ : str , A_ : Union[Version, str] , A_ : Optional[str] = None , A_ : bool = False , A_ : bool = True , A_ : Optional[List[Callable]] = None , ):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Any = dataset_name
lowerCAmelCase_ : Union[str, Any] = cache_dir
lowerCAmelCase_ : List[Any] = use_local_dummy_data
lowerCAmelCase_ : Optional[Any] = config
# download_callbacks take a single url as input
lowerCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ : int = str(A_)
# to be downloaded
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
@property
def UpperCAmelCase__ ( self : List[str]):
if self._dummy_file is None:
lowerCAmelCase_ : int = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : str):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name)
@property
def UpperCAmelCase__ ( self : str):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''')
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ : Union[str, Any] = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_)
return os.path.join(A_ , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self : List[str]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self : Optional[int]):
if self._bucket_url is None:
lowerCAmelCase_ : str = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/'''))
return self._bucket_url
@property
def UpperCAmelCase__ ( self : List[Any]):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''').split('''/''')[:-1])
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Dict , *A_ : List[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_):
return self.create_dummy_data_dict(A_ , A_)
elif isinstance(A_ , (list, tuple)):
return self.create_dummy_data_list(A_ , A_)
else:
return self.create_dummy_data_single(A_ , A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Tuple , *A_ : int):
return self.download_and_extract(A_)
def UpperCAmelCase__ ( self : Tuple , A_ : List[str] , A_ : Optional[Any]):
return self.download_and_extract(A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , *A_ : str , **A_ : List[Any]):
return path
def UpperCAmelCase__ ( self : Tuple):
return {}
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : List[Any]):
lowerCAmelCase_ : Union[str, Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_):
for single_url in single_urls:
download_callback(A_)
else:
lowerCAmelCase_ : Any = single_urls
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_):
lowerCAmelCase_ : Any = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_).name)) for x in single_urls]
else:
lowerCAmelCase_ : Optional[int] = single_urls
lowerCAmelCase_ : List[str] = os.path.join(A_ , urllib.parse.quote_plus(Path(A_).name))
lowerCAmelCase_ : Dict = value
# make sure that values are unique
if all(isinstance(A_ , A_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
lowerCAmelCase_ : Tuple = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : Dict , A_ : List[str] , A_ : str):
lowerCAmelCase_ : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ : str = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A_)) for url in data_url)
lowerCAmelCase_ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ : Any = [data_url[0]] * len(A_)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ : int = os.path.join(A_ , urllib.parse.quote_plus(single_url.split('''/''')[-1]))
dummy_data_list.append(A_)
return dummy_data_list
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[Any] , A_ : Tuple):
for download_callback in self.download_callbacks:
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ : Tuple = os.path.join(A_ , urllib.parse.quote_plus(data_url.split('''/''')[-1]))
if os.path.exists(A_) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : int):
pass
def UpperCAmelCase__ ( self : Optional[int]):
pass
def UpperCAmelCase__ ( self : List[str] , A_ : str):
def _iter_archive_members(A_ : Any):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ : Optional[int] = Path(self.dummy_file).parent
lowerCAmelCase_ : Optional[int] = path.relative_to(A_)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
lowerCAmelCase_ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(A_)
lowerCAmelCase_ : List[Any] = Path(A_)
lowerCAmelCase_ : Optional[int] = _iter_archive_members(A_) if self.use_local_dummy_data else path.rglob('''*''')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''')):
yield file_path.relative_to(A_).as_posix(), file_path.open('''rb''')
def UpperCAmelCase__ ( self : Dict , A_ : Any):
if not isinstance(A_ , A_):
lowerCAmelCase_ : Dict = [paths]
for path in paths:
if os.path.isfile(A_):
if os.path.basename(A_).startswith(('''.''', '''__''')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_):
if os.path.basename(A_).startswith(('''.''', '''__''')):
continue
dirnames.sort()
for filename in sorted(A_):
if filename.startswith(('''.''', '''__''')):
continue
yield os.path.join(A_ , A_)
| 103
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Any = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class lowerCAmelCase__ ( _lowercase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["""input_ids""", """attention_mask"""]
a__ : Tuple = TaTokenizer
a__ : Optional[int] = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : str=1_00 , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Optional[int]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCamelCase = [f'''<extra_id_{i}>''' for i in range(__UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowerCamelCase = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool('''extra_id_''' in str(__UpperCamelCase ) ) , __UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
__lowerCamelCase = extra_ids
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowerCamelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __UpperCamelCase , )
return max_model_length
def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowerCamelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Tuple ) -> int:
return list(
set(filter(lambda SCREAMING_SNAKE_CASE__ : bool(re.search(R'''<extra_id_\d+>''' , __UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : Tuple ) -> Tuple:
return [self.convert_tokens_to_ids(__UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 360
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY")
SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL")
@dataclass(frozen=__lowercase , slots=__lowercase )
class lowerCAmelCase__ ( Generic[KEY, VAL] ):
a__ : KEY
a__ : VAL
class lowerCAmelCase__ ( _Item ):
def __init__( self : str ) -> None:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : Tuple ) -> bool:
return False
SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem()
class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None:
__lowerCamelCase = initial_block_size
__lowerCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCamelCase = capacity_factor
__lowerCamelCase = 0
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int:
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
return (ind + 1) % len(self._buckets )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool:
__lowerCamelCase = self._buckets[ind]
if not stored:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def __A ( self : Any ) -> bool:
__lowerCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = self._buckets
__lowerCamelCase = [None] * new_size
__lowerCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __A ( self : str ) -> None:
self._resize(len(self._buckets ) * 2 )
def __A ( self : Dict ) -> None:
self._resize(len(self._buckets ) // 2 )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]:
__lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
__lowerCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : int ) -> int:
return self._len
def __iter__( self : Tuple ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__lowerCamelCase = ''' ,'''.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 339
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
# getting number of pixels in the image
UpperCamelCase__ , UpperCamelCase__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
UpperCamelCase__ : List[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase : str =imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCamelCase : Union[str, Any] =convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 189
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __a ( A__ ):
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Any = 8
# DPR tok
UpperCamelCase__ : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase__ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Union[str, Any] = {"unk_token": "<unk>"}
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_dummy_dataset()
UpperCamelCase__ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : str = dataset
UpperCamelCase__ : Optional[int] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_dummy_dataset()
UpperCamelCase__ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "dataset" )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
UpperCamelCase__ : str = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase__ : List[Any] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
UpperCamelCase__ : Tuple = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
UpperCamelCase__ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
UpperCamelCase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : Any = self.get_dummy_legacy_index_retriever()
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : int ):
'''simple docstring'''
import torch
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : Optional[Any] = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
UpperCamelCase__ : List[Any] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Optional[int] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 189
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[int] = "big_bird"
def __init__( self, SCREAMING_SNAKE_CASE_=5_0358, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=66, SCREAMING_SNAKE_CASE_="block_sparse", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, sep_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : int = vocab_size
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : Tuple = rescale_embeddings
UpperCamelCase : Tuple = attention_type
UpperCamelCase : Optional[int] = use_bias
UpperCamelCase : Dict = block_size
UpperCamelCase : Any = num_random_blocks
UpperCamelCase : List[Any] = classifier_dropout
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 103
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
@staticmethod
def snake_case_ ( *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
pass
def UpperCamelCase ( snake_case__ : Image ) -> str:
UpperCamelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : List[str] = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, SCREAMING_SNAKE_CASE_ )
import datasets
UpperCamelCase : int = datasets.load_dataset('hf-internal-testing/fixtures_image_utils', 'image', split='test' )
UpperCamelCase : Tuple = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
], SCREAMING_SNAKE_CASE_, )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def snake_case_ ( self ) -> Optional[int]:
pass
@slow
@require_torch
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[Any] = 'Intel/dpt-large'
UpperCamelCase : Tuple = pipeline('depth-estimation', model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCamelCase : int = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ), 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ), 2.6_62 )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 103
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() )
UpperCAmelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase__ = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCAmelCase__ = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCAmelCase__ = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
UpperCAmelCase__ = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
UpperCAmelCase__ = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , )
class lowerCAmelCase_ ( pl.Callback ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCAmelCase )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : pl.Trainer , _UpperCAmelCase : pl.LightningModule , _UpperCAmelCase : str , _UpperCAmelCase : int=True ):
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCAmelCase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase__ = od / """test_results.txt"""
UpperCAmelCase__ = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase__ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase__ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , """a+""" ) as writer:
for key in sorted(_UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase__ = metrics[key]
if isinstance(_UpperCAmelCase , torch.Tensor ):
UpperCAmelCase__ = val.item()
UpperCAmelCase__ = f'''{key}: {val:.6f}\n'''
writer.write(_UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase__ = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_UpperCAmelCase )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
try:
UpperCAmelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase__ = pl_module.model.num_parameters()
UpperCAmelCase__ = count_trainable_parameters(_UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : pl.Trainer , _UpperCAmelCase : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCAmelCase , _UpperCAmelCase , """test""" )
@rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : pl.Trainer , _UpperCAmelCase : Any ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : int = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''longformer'''
def __init__( self : Dict , lowercase_ : Union[List[int], int] = 512 , lowercase_ : int = 2 , lowercase_ : int = 1 , lowercase_ : int = 0 , lowercase_ : int = 2 , lowercase_ : int = 30522 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 3072 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 512 , lowercase_ : int = 2 , lowercase_ : float = 0.02 , lowercase_ : float = 1E-12 , lowercase_ : bool = False , **lowercase_ : List[str] , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase_ : List[Any] = attention_window
lowercase_ : Dict = sep_token_id
lowercase_ : str = bos_token_id
lowercase_ : str = eos_token_id
lowercase_ : Dict = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = hidden_act
lowercase_ : int = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : Tuple = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Tuple = onnx_export
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : "PretrainedConfig" , lowercase_ : str = "default" , lowercase_ : "List[PatchingSpec]" = None ):
super().__init__(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : Dict = True
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
if self.task == "multiple-choice":
lowercase_ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = super().outputs
if self.task == "default":
lowercase_ : Optional[int] = {0: """batch"""}
return outputs
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return 1E-4
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : "PreTrainedTokenizerBase" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
lowercase_ : str = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase_ : int = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowercase_ : List[Any] = 1
return inputs
| 21
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
| 1
|
'''simple docstring'''
import os
import sys
lowercase__ : Optional[int] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase__ : Optional[int] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *lowercase : Dict, **lowercase : Union[str, Any] ) -> int:
"""simple docstring"""
return AutoConfig.from_pretrained(*lowercase, **lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *lowercase : Optional[Any], **lowercase : List[str] ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*lowercase, **lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *lowercase : Dict, **lowercase : str ) -> int:
"""simple docstring"""
return AutoModel.from_pretrained(*lowercase, **lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *lowercase : Any, **lowercase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*lowercase, **lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *lowercase : str, **lowercase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*lowercase, **lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *lowercase : Any, **lowercase : Tuple ) -> int:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*lowercase, **lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *lowercase : List[Any], **lowercase : Dict ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*lowercase, **lowercase )
| 324
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'deformable_detr'
_snake_case : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
_UpperCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324
| 1
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a__ = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : str = 0
def __lowercase ( self ) -> Any:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowercase ( self ) -> int:
_a : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowercase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_a : int = os.path.join(UpperCAmelCase_ , '''fake-roberta''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
_a : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowercase ( self ) -> List[Any]:
try:
AutoConfig.register('''custom''' , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register('''model''' , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register('''bert''' , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_a : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
_a : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowercase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
UpperCAmelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
_a : Union[str, Any] = AutoConfig.from_pretrained('''bert-base''' )
def __lowercase ( self ) -> Tuple:
with self.assertRaisesRegex(
UpperCAmelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision='''aaaaaa''' )
def __lowercase ( self ) -> Tuple:
with self.assertRaisesRegex(
UpperCAmelCase_ , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_a : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __lowercase ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
_a : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
_a : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ )
_a : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
_a : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( snake_case__ ):
"""simple docstring"""
UpperCAmelCase__ : str = "new-model"
try:
AutoConfig.register('''new-model''' , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
_a : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
_a : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
_a : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 364
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15
| 0
|
"""simple docstring"""
from math import sqrt
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE = get_prime_numbers(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE = numbera % numbera
__SCREAMING_SNAKE_CASE = numbera
__SCREAMING_SNAKE_CASE = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE = ans
ans += fiba
__SCREAMING_SNAKE_CASE = tmp
return ans
| 54
|
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339
| 0
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
lowerCamelCase_ : List[str] = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def _A ( lowercase ):
"""simple docstring"""
a =torch.load(lowercase , map_location='''cpu''' )
return sd
def _A ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
a =OrderedDict()
a =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
a =key
for name_pair in rename_keys_prefix:
a =new_key.replace(name_pair[0] , name_pair[1] )
a =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
a =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _A ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
a ='''pretraining'''
if "vcr" in checkpoint_path:
a ={'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
a ={'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
a ={'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
a ={'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
a ={'''visual_embedding_dim''': 5_12}
a ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
a ={'''visual_embedding_dim''': 20_48}
a ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
a ={'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
a ='''vqa'''
elif "nlvr" in checkpoint_path:
a ={
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
a ='''nlvr'''
a =VisualBertConfig(**lowercase )
# Load State Dict
a =load_state_dict(lowercase )
a =get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
a =VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
a =VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
a =VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
a =VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 215
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : int = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""google/rembert""": 2_5_6,
}
lowerCamelCase_ : Optional[Any] = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
a =do_lower_case
a =remove_space
a =keep_accents
a =vocab_file
a =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 215
| 1
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
A__ : str = '''bert-base-cased'''
A__ : Optional[int] = '''fp16'''
A__ : Optional[int] = '''bf16'''
A__ : Any = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : str):
super().setUp()
lowerCAmelCase_ : int = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A_):
lowerCAmelCase_ : Optional[int] = self.dist_env.copy()
lowerCAmelCase_ : Optional[Any] = F"""{i + 1}"""
lowerCAmelCase_ : List[Any] = strategy
with mockenv_context(**A_):
lowerCAmelCase_ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A_):
lowerCAmelCase_ : Tuple = self.dist_env.copy()
lowerCAmelCase_ : str = prefetch_policy
with mockenv_context(**A_):
lowerCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def UpperCAmelCase__ ( self : int):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A_):
lowerCAmelCase_ : Optional[int] = self.dist_env.copy()
lowerCAmelCase_ : str = state_dict_type
with mockenv_context(**A_):
lowerCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : str = AutoModel.from_pretrained(A_)
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCAmelCase_ : Tuple = self.dist_env.copy()
lowerCAmelCase_ : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCAmelCase_ : List[str] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
lowerCAmelCase_ : Optional[int] = '''2000'''
with mockenv_context(**A_):
lowerCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A_)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowerCAmelCase_ : str = self.dist_env.copy()
lowerCAmelCase_ : List[str] = '''TRANSFORMER_BASED_WRAP'''
lowerCAmelCase_ : Optional[int] = '''T5Layer'''
with mockenv_context(**A_):
lowerCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
with self.assertRaises(A_) as cm:
fsdp_plugin.set_auto_wrap_policy(A_)
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception))
lowerCAmelCase_ : Union[str, Any] = self.dist_env.copy()
lowerCAmelCase_ : List[Any] = '''SIZE_BASED_WRAP'''
lowerCAmelCase_ : List[Any] = '''0'''
with mockenv_context(**A_):
lowerCAmelCase_ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A_)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def UpperCAmelCase__ ( self : Optional[Any]):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCAmelCase_ : Dict = self.dist_env.copy()
lowerCAmelCase_ : List[str] = mp_dtype
with mockenv_context(**A_):
lowerCAmelCase_ : List[Any] = Accelerator()
if mp_dtype == "fp16":
lowerCAmelCase_ : Optional[Any] = torch.floataa
elif mp_dtype == "bf16":
lowerCAmelCase_ : Any = torch.bfloataa
lowerCAmelCase_ : Optional[Any] = MixedPrecision(param_dtype=A_ , reduce_dtype=A_ , buffer_dtype=A_)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A_)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A_))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(A_)
def UpperCAmelCase__ ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCAmelCase_ : Any = self.dist_env.copy()
lowerCAmelCase_ : Union[str, Any] = str(A_).lower()
with mockenv_context(**A_):
lowerCAmelCase_ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A_))
@require_fsdp
@require_multi_gpu
@slow
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : List[Any]):
super().setUp()
lowerCAmelCase_ : Optional[Any] = 0.82
lowerCAmelCase_ : Union[str, Any] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
lowerCAmelCase_ : Union[str, Any] = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCAmelCase_ : Union[str, Any] = 1_6_0
lowerCAmelCase_ : List[Any] = 1_6_0
lowerCAmelCase_ : Dict = inspect.getfile(accelerate.test_utils)
lowerCAmelCase_ : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps'''])
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = os.path.join(self.test_scripts_folder , '''test_performance.py''')
lowerCAmelCase_ : Tuple = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
lowerCAmelCase_ : Union[str, Any] = cmd.copy()
for i, strategy in enumerate(A_):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''')
else:
cmd_config.append('''--mixed_precision=fp16''')
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''')
lowerCAmelCase_ : List[str] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(A_):
lowerCAmelCase_ : List[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
if strategy != "FULL_SHARD":
continue
lowerCAmelCase_ : int = len(A_)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCAmelCase_ : List[str] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""")
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
lowerCAmelCase_ : int = cmd_config[:-1]
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdir , '''epoch_0''')
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''')
lowerCAmelCase_ : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCAmelCase_ : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''])
else:
cmd_config.extend(['''--mixed_precision=no'''])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''])
for i, strategy in enumerate(A_):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""")
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''')
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A_ , env=os.environ.copy())
| 103
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=A_ , )
assert hasattr(self , '''env''')
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : str=1):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[Any]):
TrainingJobAnalytics(A_).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def UpperCAmelCase__ ( self : int):
# create estimator
lowerCAmelCase_ : List[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
lowerCAmelCase_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
lowerCAmelCase_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ : Dict = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , A_)
| 103
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : Optional[int] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Dict = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
snake_case_ : Optional[Any] = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class lowercase__ ( lowercase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = RealmTokenizer
def __init__( self : Optional[int] ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[int]="[UNK]" ,lowerCamelCase__ : Tuple="[SEP]" ,lowerCamelCase__ : Union[str, Any]="[PAD]" ,lowerCamelCase__ : List[Any]="[CLS]" ,lowerCamelCase__ : Dict="[MASK]" ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_UpperCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_UpperCamelCase : List[Any] = getattr(_a ,normalizer_state.pop('type' ) )
_UpperCamelCase : Any = do_lower_case
_UpperCamelCase : int = strip_accents
_UpperCamelCase : Tuple = tokenize_chinese_chars
_UpperCamelCase : List[Any] = normalizer_class(**_a )
_UpperCamelCase : Dict = do_lower_case
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int ,**lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Tuple = text
_UpperCamelCase : Tuple = kwargs.pop('text_pair' ,_a )
_UpperCamelCase : Tuple = kwargs.pop('return_tensors' ,_a )
_UpperCamelCase : Tuple = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
_UpperCamelCase : int = batch_text_pair[idx]
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : List[Any] = super().__call__(_a ,_a ,return_tensors=_a ,**_a )
_UpperCamelCase : List[str] = encoded_candidates.get('input_ids' )
_UpperCamelCase : int = encoded_candidates.get('attention_mask' )
_UpperCamelCase : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
_UpperCamelCase : List[Any] = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a ,tensor_type=_a )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Any = [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
_UpperCamelCase : Tuple = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 355
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case_ : List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
snake_case_ : Tuple = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
snake_case_ : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
snake_case_ : str = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
snake_case_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any]=[1, 10, 100] ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : List[Any]=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = Counter()
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ):
for candidate in candidates:
_UpperCamelCase : int = candidate + '\n' + test_case
_UpperCamelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase : Dict = executor.submit(lowerCamelCase__ ,*lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_UpperCamelCase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_UpperCamelCase , _UpperCamelCase : List[str] = [], []
for result in results.values():
result.sort()
_UpperCamelCase : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = np.array(lowerCamelCase__ )
_UpperCamelCase : List[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = k
_UpperCamelCase : Tuple = {F'pass@{k}': estimate_pass_at_k(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 236
| 0
|
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : str = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase_( lowerCamelCase_ = 5000 ) -> int:
_lowercase : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCamelCase_ )]
for i, pentagonal_i in enumerate(lowerCamelCase_ ):
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
_lowercase : List[Any] = pentagonal_nums[j]
_lowercase : Optional[int] = pentagonal_i + pentagonal_j
_lowercase : str = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCamelCase_ ) and is_pentagonal(lowerCamelCase_ ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 21
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21
| 1
|
'''simple docstring'''
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float ):
return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 37
|
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15
| 0
|
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _a ( a :List[str] , a :Tuple , a :Union[str, Any] , a :Optional[int] ) -> int:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
a = TOKENIZER_CLASSES
else:
a = {tokenizer_name: getattr(a , tokenizer_name + '''Fast''' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
a = TOKENIZER_CLASSES[tokenizer_name]
a = True
if checkpoint_name is None:
a = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
a = tokenizer_class.from_pretrained(a , force_download=a )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
a , a = checkpoint.split('''/''' )
a = os.path.join(a , a )
elif add_prefix:
a = checkpoint
a = dump_path
else:
a = None
a = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a = file_path.split(a )[-1][0]
if next_char == "/":
a = os.path.join(a , a )
a = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
a = tokenizer.save_pretrained(
a , legacy_format=a , filename_prefix=a )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(a )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
UpperCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[Any] = pipeline(task="""text-generation""" ,model="""sshleifer/tiny-ctrl""" ,framework="""pt""" )
# Using `do_sample=False` to force deterministic output
_UpperCAmelCase : List[str] = text_generator("""This is a test""" ,do_sample=a_ )
self.assertEqual(
a_ ,[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] ,)
_UpperCAmelCase : int = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
a_ ,[
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] ,)
_UpperCAmelCase : Any = text_generator("""This is a test""" ,do_sample=a_ ,num_return_sequences=2 ,return_tensors=a_ )
self.assertEqual(
a_ ,[
{"""generated_token_ids""": ANY(a_ )},
{"""generated_token_ids""": ANY(a_ )},
] ,)
_UpperCAmelCase : Tuple = text_generator.model.config.eos_token_id
_UpperCAmelCase : Union[str, Any] = """<pad>"""
_UpperCAmelCase : Any = text_generator(
["""This is a test""", """This is a second test"""] ,do_sample=a_ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=a_ ,)
self.assertEqual(
a_ ,[
[
{"""generated_token_ids""": ANY(a_ )},
{"""generated_token_ids""": ANY(a_ )},
],
[
{"""generated_token_ids""": ANY(a_ )},
{"""generated_token_ids""": ANY(a_ )},
],
] ,)
@require_tf
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = pipeline(task="""text-generation""" ,model="""sshleifer/tiny-ctrl""" ,framework="""tf""" )
# Using `do_sample=False` to force deterministic output
_UpperCAmelCase : List[Any] = text_generator("""This is a test""" ,do_sample=a_ )
self.assertEqual(
a_ ,[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] ,)
_UpperCAmelCase : Any = text_generator(["""This is a test""", """This is a second test"""] ,do_sample=a_ )
self.assertEqual(
a_ ,[
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] ,)
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = TextGenerationPipeline(model=a_ ,tokenizer=a_ )
return text_generator, ["This is a test", "Another test"]
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Dict = """Hello I believe in"""
_UpperCAmelCase : Any = pipeline("""text-generation""" ,model="""hf-internal-testing/tiny-random-gpt2""" )
_UpperCAmelCase : Optional[int] = text_generator(a_ )
self.assertEqual(
a_ ,[{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] ,)
_UpperCAmelCase : Union[str, Any] = text_generator(a_ ,stop_sequence=""" fe""" )
self.assertEqual(a_ ,[{"""generated_text""": """Hello I believe in fe"""}] )
def _snake_case ( self ,a_ ,a_ ) -> int:
_UpperCAmelCase : Tuple = text_generator.model
_UpperCAmelCase : Union[str, Any] = text_generator.tokenizer
_UpperCAmelCase : Optional[int] = text_generator("""This is a test""" )
self.assertEqual(a_ ,[{"""generated_text""": ANY(a_ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
_UpperCAmelCase : Optional[Any] = text_generator("""This is a test""" ,return_full_text=a_ )
self.assertEqual(a_ ,[{"""generated_text""": ANY(a_ )}] )
self.assertNotIn("""This is a test""" ,outputs[0]["""generated_text"""] )
_UpperCAmelCase : Optional[Any] = pipeline(task="""text-generation""" ,model=a_ ,tokenizer=a_ ,return_full_text=a_ )
_UpperCAmelCase : str = text_generator("""This is a test""" )
self.assertEqual(a_ ,[{"""generated_text""": ANY(a_ )}] )
self.assertNotIn("""This is a test""" ,outputs[0]["""generated_text"""] )
_UpperCAmelCase : List[str] = text_generator("""This is a test""" ,return_full_text=a_ )
self.assertEqual(a_ ,[{"""generated_text""": ANY(a_ )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
_UpperCAmelCase : str = text_generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=a_ )
self.assertEqual(
a_ ,[
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
] ,)
if text_generator.tokenizer.pad_token is not None:
_UpperCAmelCase : Optional[Any] = text_generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=a_ )
self.assertEqual(
a_ ,[
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
] ,)
with self.assertRaises(a_ ):
_UpperCAmelCase : str = text_generator("""test""" ,return_full_text=a_ ,return_text=a_ )
with self.assertRaises(a_ ):
_UpperCAmelCase : Dict = text_generator("""test""" ,return_full_text=a_ ,return_tensors=a_ )
with self.assertRaises(a_ ):
_UpperCAmelCase : Union[str, Any] = text_generator("""test""" ,return_text=a_ ,return_tensors=a_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCAmelCase : Optional[Any] = text_generator("""""" )
self.assertEqual(a_ ,[{"""generated_text""": ANY(a_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCAmelCase : List[Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCAmelCase : int = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 ,max_new_tokens=20 )
_UpperCAmelCase : List[Any] = text_generator("""This is a test""" * 500 ,handle_long_generation="""hole""" ,max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a_ ):
text_generator(
"""This is a test""" * 500 ,handle_long_generation="""hole""" ,max_new_tokens=tokenizer.model_max_length + 10 ,)
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self ) -> Union[str, Any]:
import torch
# Classic `model_kwargs`
_UpperCAmelCase : Tuple = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" ,model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} ,)
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa )
_UpperCAmelCase : Union[str, Any] = pipe("""This is a test""" )
self.assertEqual(
a_ ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCAmelCase : Dict = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" ,torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa )
_UpperCAmelCase : Tuple = pipe("""This is a test""" )
self.assertEqual(
a_ ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCAmelCase : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" )
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.floataa )
_UpperCAmelCase : List[Any] = pipe("""This is a test""" )
self.assertEqual(
a_ ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
@require_torch
@require_torch_gpu
def _snake_case ( self ) -> str:
import torch
_UpperCAmelCase : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device=0 ,torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self ) -> Dict:
import torch
_UpperCAmelCase : List[str] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" ,torch_dtype=torch.floataa )
pipe("""This is a test""" ,do_sample=a_ ,top_p=0.5 )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : str = """Hello world"""
_UpperCAmelCase : Optional[int] = pipeline("""text-generation""" ,model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
_UpperCAmelCase : str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
_UpperCAmelCase : List[Any] = logging.get_logger("""transformers.generation.utils""" )
_UpperCAmelCase : int = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a_ ) as cl:
_UpperCAmelCase : Dict = text_generator(a_ ,max_length=10 ,max_new_tokens=1 )
self.assertIn(a_ ,cl.out )
# The user only sets one -> no warning
with CaptureLogger(a_ ) as cl:
_UpperCAmelCase : List[str] = text_generator(a_ ,max_new_tokens=1 )
self.assertNotIn(a_ ,cl.out )
with CaptureLogger(a_ ) as cl:
_UpperCAmelCase : Dict = text_generator(a_ ,max_length=10 )
self.assertNotIn(a_ ,cl.out )
| 215
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = RemBertConfig.from_json_file(lowerCAmelCase_ )
print("""Building PyTorch model from configuration: {}""".format(str(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Any = RemBertModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowerCAmelCase_ ) )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ : Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 215
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = """Hello world! cécé herlolip"""
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE : List[Any] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE : str = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE : Any = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE : Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE : List[str] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE : List[Any] = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE : Optional[Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE : List[str] = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE : Dict = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE : Dict = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE : int = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE : int = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE : List[Any] = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE : List[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE : Dict = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE : List[str] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE : torch.Tensor = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase )[0]
if classification_head:
SCREAMING_SNAKE_CASE : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Optional[int] = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE : Tuple = torch.allclose(lowercase , lowercase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 319
|
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319
| 1
|
import string
def UpperCAmelCase__ ( lowerCamelCase ):
for key in range(len(string.ascii_uppercase ) ):
lowercase :Any = ""
for symbol in message:
if symbol in string.ascii_uppercase:
lowercase :Dict = string.ascii_uppercase.find(lowerCamelCase )
lowercase :Optional[Any] = num - key
if num < 0:
lowercase :Union[str, Any] = num + len(string.ascii_uppercase )
lowercase :List[str] = translated + string.ascii_uppercase[num]
else:
lowercase :int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def UpperCAmelCase__ ( ):
lowercase :Tuple = input("Encrypted message: " )
lowercase :Dict = message.upper()
decrypt(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 236
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''whisper'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _lowerCAmelCase: str=5_18_65 , _lowerCAmelCase: str=80 , _lowerCAmelCase: int=6 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: Any=15_36 , _lowerCAmelCase: Union[str, Any]=15_36 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: List[Any]=5_02_57 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Dict=2_56 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Any=False , _lowerCAmelCase: List[str]=15_00 , _lowerCAmelCase: Tuple=4_48 , _lowerCAmelCase: Optional[Any]=5_02_56 , _lowerCAmelCase: Dict=5_02_56 , _lowerCAmelCase: List[Any]=5_02_56 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: str=[2_20, 5_02_56] , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[int]=2_56 , _lowerCAmelCase: int=False , _lowerCAmelCase: Dict=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: List[Any]=7 , **_lowerCAmelCase: Union[str, Any] , ):
lowercase :Optional[Any] = vocab_size
lowercase :Optional[int] = num_mel_bins
lowercase :Union[str, Any] = d_model
lowercase :List[Any] = encoder_layers
lowercase :Optional[Any] = encoder_attention_heads
lowercase :Union[str, Any] = decoder_layers
lowercase :List[str] = decoder_attention_heads
lowercase :Optional[int] = decoder_ffn_dim
lowercase :List[Any] = encoder_ffn_dim
lowercase :Optional[Any] = dropout
lowercase :Tuple = attention_dropout
lowercase :Tuple = activation_dropout
lowercase :Optional[Any] = activation_function
lowercase :Any = init_std
lowercase :Optional[int] = encoder_layerdrop
lowercase :Optional[int] = decoder_layerdrop
lowercase :str = use_cache
lowercase :Optional[Any] = encoder_layers
lowercase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase :Any = max_source_positions
lowercase :Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase :int = classifier_proj_size
lowercase :List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :Tuple = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Union[str, Any] = mask_time_length
lowercase :Dict = mask_time_min_masks
lowercase :Tuple = mask_feature_prob
lowercase :List[Any] = mask_feature_length
lowercase :List[Any] = mask_feature_min_masks
lowercase :Any = median_filter_width
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , suppress_tokens=_lowerCAmelCase , begin_suppress_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Tuple = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase :List[Any] = {0: "batch"}
else:
lowercase :str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 2_20_50 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 2_20 , ):
lowercase :List[str] = OrderedDict()
lowercase :str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_lowerCAmelCase , framework=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , time_duration=_lowerCAmelCase , frequency=_lowerCAmelCase , )
lowercase :Optional[Any] = encoder_inputs["input_features"].shape[2]
lowercase :List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase :Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :str = encoder_inputs.pop("input_features" )
lowercase :Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase :List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return 1e-3
| 236
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.model'}
a_ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
a_ = {
'google/rembert': 256,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , snake_case : Optional[int] , snake_case : Tuple=False , snake_case : str=True , snake_case : Any=True , snake_case : Dict="[CLS]" , snake_case : Optional[Any]="[SEP]" , snake_case : List[Any]="[UNK]" , snake_case : Optional[Any]="[SEP]" , snake_case : Union[str, Any]="[PAD]" , snake_case : List[Any]="[CLS]" , snake_case : Optional[Any]="[MASK]" , **snake_case : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
UpperCamelCase_ : List[str] = do_lower_case
UpperCamelCase_ : Dict = remove_space
UpperCamelCase_ : Optional[int] = keep_accents
UpperCamelCase_ : Union[str, Any] = vocab_file
UpperCamelCase_ : Any = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = self.__dict__.copy()
UpperCamelCase_ : Any = None
return state
def __setstate__( self : Optional[Any] , snake_case : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = d
UpperCamelCase_ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[Any] , snake_case : str=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.sp_model.EncodeAsPieces(snake_case )
return pieces
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Any ) -> str:
"""simple docstring"""
return self.sp_model.PieceToId(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Dict ) -> Dict:
"""simple docstring"""
return self.sp_model.IdToPiece(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = self.sp_model.decode_pieces(snake_case )
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : Any = [self.sep_token_id]
UpperCamelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(snake_case ) )
return
UpperCamelCase_ : Optional[Any] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 50
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case : int , snake_case : Union[str, Any]=2 , snake_case : Optional[Any]=5_6 , snake_case : Dict=True , snake_case : Optional[Any]=True , snake_case : Any=True , snake_case : List[Any]=True , snake_case : Tuple=9_9 , snake_case : Any=3_2 , snake_case : List[Any]=2 , snake_case : Optional[Any]=2 , snake_case : str=7 , snake_case : Dict="gelu_new" , snake_case : List[str]=0.1 , snake_case : Dict=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Tuple=1_6 , snake_case : Dict=2 , snake_case : List[str]=0.02 , snake_case : Optional[int]=4 , snake_case : str="block_sparse" , snake_case : List[Any]=True , snake_case : int=False , snake_case : Tuple=2 , snake_case : Optional[int]=3 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Dict = use_attention_mask
UpperCamelCase_ : List[Any] = use_token_type_ids
UpperCamelCase_ : Optional[Any] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Union[str, Any] = hidden_size
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[Any] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = max_position_embeddings
UpperCamelCase_ : List[Any] = type_vocab_size
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : int = num_choices
UpperCamelCase_ : str = rescale_embeddings
UpperCamelCase_ : List[Any] = attention_type
UpperCamelCase_ : Optional[Any] = use_bias
UpperCamelCase_ : List[str] = block_size
UpperCamelCase_ : int = num_random_blocks
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : str = None
if self.use_attention_mask:
UpperCamelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : int = None
if self.use_token_type_ids:
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Tuple = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : int = config_and_inputs
UpperCamelCase_ : Tuple = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ : Optional[Any] = self._prepare_for_class(snake_case , snake_case )
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
@jax.jit
def model_jitted(snake_case : str , snake_case : List[str]=None , **snake_case : Tuple ):
return model(input_ids=snake_case , attention_mask=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
UpperCamelCase_ : List[str] = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_ : List[str] = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Optional[int]=1e-5 , snake_case : Tuple="outputs" , snake_case : Dict=None ) -> Dict:
"""simple docstring"""
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
| 50
| 1
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
re.sub("<n>" , "" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 247
|
"""simple docstring"""
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__UpperCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase_ : List[Any] = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
lowerCAmelCase_ : Optional[Any] = getattr(lowercase__ , lowercase__ ).shape
else:
lowerCAmelCase_ : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase_ : Union[str, Any] = value
elif weight_type == "weight_g":
lowerCAmelCase_ : Dict = value
elif weight_type == "weight_v":
lowerCAmelCase_ : List[Any] = value
elif weight_type == "bias":
lowerCAmelCase_ : Dict = value
else:
lowerCAmelCase_ : List[Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[int] = fairseq_model.state_dict()
lowerCAmelCase_ : Union[str, Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase_ : List[Any] = None
for name, value in fairseq_dict.items():
lowerCAmelCase_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase_ : Any = True
elif name.split(""".""" )[0] == "proj":
lowerCAmelCase_ : List[str] = fairseq_model.proj
lowerCAmelCase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase_ : Dict = True
if "*" in mapped_key:
lowerCAmelCase_ : Optional[int] = name.split(lowercase__ )[0].split(""".""" )[-2]
lowerCAmelCase_ : List[Any] = mapped_key.replace("""*""" , lowercase__ )
if "weight_g" in name:
lowerCAmelCase_ : List[Any] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase_ : Any = """weight_v"""
elif "bias" in name:
lowerCAmelCase_ : Union[str, Any] = """bias"""
elif "weight" in name:
lowerCAmelCase_ : str = """weight"""
else:
lowerCAmelCase_ : List[Any] = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase_ : Optional[Any] = name.split(""".""" )
lowerCAmelCase_ : Optional[int] = int(items[0] )
lowerCAmelCase_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase_ : List[str] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase_ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase_ : Union[str, Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase_ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase__ )
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = emb.weight.shape
lowerCAmelCase_ : Optional[Any] = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
lowerCAmelCase_ : Optional[int] = emb.weight.data
return lin_layer
def __UpperCamelCase ( lowercase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : int = f.readlines()
lowerCAmelCase_ : Dict = [line.split(""" """ )[0] for line in lines]
lowerCAmelCase_ : Optional[Any] = len(lowercase__ )
lowerCAmelCase_ : Dict = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowercase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = WavaVecaConfig.from_pretrained(lowercase__ )
lowerCAmelCase_ : List[Any] = SpeechaTextaConfig.from_pretrained(
lowercase__ , vocab_size=lowercase__ , decoder_layers=lowercase__ , do_stable_layer_norm=lowercase__ )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowerCAmelCase_ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase_ : int = WavaVecaModel(lowercase__ )
lowerCAmelCase_ : List[Any] = recursively_load_weights_wavaveca(model.encoder , lowercase__ )
lowerCAmelCase_ : Dict = SpeechaTextaForCausalLM(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowerCAmelCase_ : str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase_ : int = SpeechEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ )
lowerCAmelCase_ : Dict = False
# add projection layer
lowerCAmelCase_ : List[Any] = nn.Parameter(projection_layer.weight )
lowerCAmelCase_ : Optional[int] = nn.Parameter(projection_layer.bias )
lowerCAmelCase_ : Union[str, Any] = create_vocab_dict(lowercase__ )
with open(os.path.join(lowercase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(lowercase__ , lowercase__ )
lowerCAmelCase_ : int = SpeechaTextaTokenizer(os.path.join(lowercase__ , """vocab.json""" ) )
tokenizer.save_pretrained(lowercase__ )
lowerCAmelCase_ : Optional[Any] = hf_wavavec.config.to_dict()
lowerCAmelCase_ : Union[str, Any] = tokenizer.pad_token_id
lowerCAmelCase_ : List[str] = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer.eos_token_id
lowerCAmelCase_ : Optional[int] = """speech_to_text_2"""
lowerCAmelCase_ : Union[str, Any] = """wav2vec2"""
lowerCAmelCase_ : Any = SpeechEncoderDecoderConfig.from_dict(lowercase__ )
hf_wavavec.save_pretrained(lowercase__ )
feature_extractor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ : Tuple = '''LayoutLMv3ImageProcessor'''
UpperCAmelCase_ : Optional[Any] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
lowerCAmelCase = kwargs.pop("""feature_extractor""")
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_a , _a)
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""")
# first, apply the image processor
lowerCAmelCase = self.image_processor(images=_a , return_tensors=_a)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a):
lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase = features["""words"""]
lowerCAmelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
lowerCAmelCase = features.pop("""pixel_values""")
if return_overflowing_tokens is True:
lowerCAmelCase = self.get_overflowing_images(_a , encoded_inputs["""overflow_to_sample_mapping"""])
lowerCAmelCase = images
return encoded_inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_a) != len(_a):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f" {len(_a)} and {len(_a)}")
return images_with_overflow
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a)
@property
def a_ ( self):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a_ ( self):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a_ ( self):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 272
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
a : Tuple = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a : List[str] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
a : List[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Any:
a : Optional[int] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
a : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
a : Union[str, Any] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
a : Optional[Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 79
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _SCREAMING_SNAKE_CASE ( _lowercase : dict ) ->tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->XGBClassifier:
'''simple docstring'''
a : List[Any] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : List[str] = load_iris()
a, a : Optional[int] = data_handling(_lowercase )
a, a, a, a : Tuple = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
a : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
a : Dict = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 79
| 1
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''Hello world! cécé herlolip'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Any:
A: List[str] = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
A: Tuple = roberta.model.encoder.sentence_encoder
A: Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
A: Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , __lowercase )
A: Any = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A: Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
A: int = roberta_sent_encoder.embed_positions.weight
A: Dict = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A: Dict = roberta_sent_encoder.layer_norm.weight
A: Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A: BertLayer = model.roberta.encoder.layer[i]
A: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A: RobertaAttention = layer.attention
A: Optional[Any] = roberta_layer.self_attn_layer_norm.weight
A: Dict = roberta_layer.self_attn_layer_norm.bias
# self attention
A: BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A: Dict = roberta_layer.self_attn.q_proj.weight
A: List[Any] = roberta_layer.self_attn.q_proj.bias
A: int = roberta_layer.self_attn.k_proj.weight
A: int = roberta_layer.self_attn.k_proj.bias
A: Tuple = roberta_layer.self_attn.v_proj.weight
A: Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
A: BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A: str = roberta_layer.self_attn.out_proj.weight
A: List[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A: List[Any] = roberta_layer.final_layer_norm.weight
A: Dict = roberta_layer.final_layer_norm.bias
# intermediate
A: BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A: List[Any] = roberta_layer.fca.weight
A: Dict = roberta_layer.fca.bias
# output
A: BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A: Any = roberta_layer.fca.weight
A: List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A: Optional[Any] = roberta.model.classification_heads['''mnli'''].dense.weight
A: Any = roberta.model.classification_heads['''mnli'''].dense.bias
A: str = roberta.model.classification_heads['''mnli'''].out_proj.weight
A: int = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A: str = roberta.model.encoder.lm_head.dense.weight
A: int = roberta.model.encoder.lm_head.dense.bias
A: List[Any] = roberta.model.encoder.lm_head.layer_norm.weight
A: str = roberta.model.encoder.lm_head.layer_norm.bias
A: List[str] = roberta.model.encoder.lm_head.weight
A: str = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A: torch.Tensor = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
A: Optional[int] = model(__lowercase )[0]
if classification_head:
A: int = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__lowercase ) )
else:
A: Dict = roberta.model(__lowercase )[0]
print(our_output.shape , their_output.shape )
A: Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
A: str = torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 319
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowerCamelCase : Union[str, Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
_lowerCamelCase : List[Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
_lowerCamelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 12
| 1
|
from string import ascii_uppercase
_UpperCAmelCase : Tuple = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Optional[Any] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[Any] = len(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = 0
while True:
if x == i:
lowerCamelCase__ : Any = 0
if len(_UpperCAmelCase ) == len(_UpperCAmelCase ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Union[str, Any] = ''
lowerCamelCase__ : Union[str, Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase__ : Any = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Dict = ''
lowerCamelCase__ : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase__ : Tuple = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE ( ) -> None:
lowerCamelCase__ : Tuple = 'THE GERMAN ATTACK'
lowerCamelCase__ : Optional[Any] = 'SECRET'
lowerCamelCase__ : int = generate_key(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = cipher_text(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(_UpperCAmelCase , _UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 50
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMPipeline
SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
lowercase__ : Any = DDIMScheduler()
lowercase__ : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> int:
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Optional[Any] = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : int = '''cpu'''
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : str = self.get_dummy_inputs(__lowerCAmelCase )
lowercase__ : str = pipe(**__lowerCAmelCase ).images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__ : Optional[int] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase__ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _lowerCAmelCase( self ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> List[str]:
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = '''google/ddpm-cifar10-32'''
lowercase__ : List[Any] = UNetaDModel.from_pretrained(__lowerCAmelCase )
lowercase__ : str = DDIMScheduler()
lowercase__ : List[Any] = DDIMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ddim.to(__lowerCAmelCase )
ddim.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Optional[int] = ddim(generator=__lowerCAmelCase , eta=0.0 , output_type='''numpy''' ).images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = '''google/ddpm-ema-bedroom-256'''
lowercase__ : Tuple = UNetaDModel.from_pretrained(__lowerCAmelCase )
lowercase__ : Union[str, Any] = DDIMScheduler.from_pretrained(__lowerCAmelCase )
lowercase__ : List[Any] = DDIMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ddpm.to(__lowerCAmelCase )
ddpm.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = ddpm(generator=__lowerCAmelCase , output_type='''numpy''' ).images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Union[str, Any] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 214
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__a: Tuple = True
except (ImportError, ModuleNotFoundError):
__a: List[Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __UpperCamelCase ( UpperCAmelCase ):
re.sub('''<n>''' , '''''' , UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase ) )
| 214
| 1
|
'''simple docstring'''
from timeit import timeit
_lowerCamelCase : List[str] = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = len(A__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = len(A__ ) // 2
UpperCamelCase = len(A__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(A__ ) )
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
if len(A__ ) <= 2:
return True
if s[0] == s[len(A__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return s == s[::-1]
def __lowerCamelCase ( A__ ) -> None:
"""simple docstring"""
UpperCamelCase = F"""all({name}(key) is value for key, value in test_data.items())"""
UpperCamelCase = F"""from __main__ import test_data, {name}"""
UpperCamelCase = 500_000
UpperCamelCase = timeit(stmt=A__ , setup=A__ , number=A__ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 28
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = DanceDiffusionPipeline
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=lowerCamelCase__ ,use_timestep_embedding=lowerCamelCase__ ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_UpperCamelCase : int = IPNDMScheduler()
_UpperCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : str = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : int = DanceDiffusionPipeline(**lowerCamelCase__ )
_UpperCamelCase : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.audios
_UpperCamelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch_device
_UpperCamelCase : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_UpperCamelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Optional[int] = output.audios
_UpperCamelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Tuple = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = torch_device
_UpperCamelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_UpperCamelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Any = output.audios
_UpperCamelCase : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 236
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case_ : Dict = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : Optional[int] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
_UpperCamelCase : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
_UpperCamelCase : Any = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Union[str, Any] = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : List[Any] = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
_UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : int = pto[0].numpy()
_UpperCamelCase : Any = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format='h5' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , ):
if args_model_type is None:
_UpperCamelCase : List[Any] = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Tuple = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print('=' * 1_0_0 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Any = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : int = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case_ : Optional[int] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 236
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase , __lowercase=False ) -> int:
'''simple docstring'''
_A = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_A = ""
else:
_A = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[
: config.hidden_size, :
]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
_A = dct.pop(__lowercase )
_A = val
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , )
_A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 )
_A = False
# load original model from timm
_A = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_A = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowercase )
_A = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
_A = "huggingface/label-files"
_A = "imagenet-1k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_A = ViTHybridModel(__lowercase ).eval()
else:
_A = ViTHybridForImageClassification(__lowercase ).eval()
model.load_state_dict(__lowercase )
# create image processor
_A = create_transform(**resolve_data_config({} , model=__lowercase ) )
_A = transform.transforms
_A = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_A = ViTHybridImageProcessor(
do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_A = prepare_img()
_A = transform(__lowercase ).unsqueeze(0 )
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__lowercase , __lowercase )
# verify logits
with torch.no_grad():
_A = model(__lowercase )
_A = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_A = timm_model.forward_features(__lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 )
else:
_A = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 79
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
_A = sylvester(number - 1 )
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 79
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = ReformerTokenizer
lowercase = ReformerTokenizerFast
lowercase = True
lowercase = False
lowercase = True
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCamelCase_ : Dict = ReformerTokenizer(snake_case , keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '<s>'
UpperCamelCase_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case ) , 1_0_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase_ : Any = self.get_tokenizer()
UpperCamelCase_ : List[str] = self.get_rust_tokenizer()
UpperCamelCase_ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
UpperCamelCase_ : Dict = tokenizer.tokenize(snake_case )
UpperCamelCase_ : Dict = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase_ : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
UpperCamelCase_ : Optional[Any] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase_ : Any = self.get_rust_tokenizer()
UpperCamelCase_ : Tuple = tokenizer.encode(snake_case )
UpperCamelCase_ : Optional[Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int=1_5 ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
UpperCamelCase_ : int = 'This is a simple input'
UpperCamelCase_ : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase_ : List[Any] = ('This is a simple input', 'This is a pair')
UpperCamelCase_ : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = ReformerTokenizer(snake_case , keep_accents=snake_case )
UpperCamelCase_ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCamelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase_ : Any = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCamelCase_ : Any = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = 'Hello World!'
UpperCamelCase_ : Optional[Any] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase_ : Union[str, Any] = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCamelCase_ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCamelCase_ : List[str] = ' '.join(snake_case )
UpperCamelCase_ : int = self.big_tokenizer.encode_plus(snake_case , return_tensors='pt' )
UpperCamelCase_ : Union[str, Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
UpperCamelCase_ : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCamelCase_ : List[Any] = encoded_sequence['input_ids'].shape
UpperCamelCase_ : int = ReformerModel(snake_case )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case )
model(**snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCamelCase_ : Tuple = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=snake_case , sequences=snake_case , )
| 351
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowercase :
def __init__( self : List[Any] , snake_case : int , snake_case : Any=9_9 , snake_case : Tuple=1_3 , snake_case : str=7 , snake_case : List[str]=9 , snake_case : Optional[Any]=True , snake_case : Any=True , snake_case : Optional[Any]=False , snake_case : List[str]=3_2 , snake_case : str=5 , snake_case : Any=4 , snake_case : List[str]=3_7 , snake_case : Optional[Any]=8 , snake_case : Optional[Any]=0.1 , snake_case : Dict=0.002 , snake_case : Any=1 , snake_case : Optional[int]=0 , snake_case : List[str]=0 , snake_case : List[str]=None , snake_case : List[str]=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : List[Any] = batch_size
UpperCamelCase_ : int = encoder_seq_length
UpperCamelCase_ : int = decoder_seq_length
# For common tests
UpperCamelCase_ : List[Any] = self.decoder_seq_length
UpperCamelCase_ : Optional[Any] = is_training
UpperCamelCase_ : Tuple = use_attention_mask
UpperCamelCase_ : int = use_labels
UpperCamelCase_ : List[str] = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : Any = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Dict = d_ff
UpperCamelCase_ : List[Any] = relative_attention_num_buckets
UpperCamelCase_ : List[Any] = dropout_rate
UpperCamelCase_ : Dict = initializer_factor
UpperCamelCase_ : Union[str, Any] = eos_token_id
UpperCamelCase_ : Optional[int] = pad_token_id
UpperCamelCase_ : List[str] = decoder_start_token_id
UpperCamelCase_ : str = None
UpperCamelCase_ : int = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int]=None , snake_case : List[Any]=None , snake_case : int=None , snake_case : Optional[int]=None , snake_case : Tuple=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase_ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Optional[int] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case )
if decoder_head_mask is None:
UpperCamelCase_ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case )
if cross_attn_head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : Dict = self.get_config()
UpperCamelCase_ : Dict = config.num_attention_heads
UpperCamelCase_ : Optional[int] = self.prepare_inputs_dict(snake_case , snake_case , snake_case )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : int , snake_case : List[str] , snake_case : Optional[Any] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = UMTaModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Any = model(
input_ids=snake_case , decoder_input_ids=snake_case , attention_mask=snake_case , decoder_attention_mask=snake_case , )
UpperCamelCase_ : List[str] = model(input_ids=snake_case , decoder_input_ids=snake_case )
UpperCamelCase_ : Optional[Any] = result.last_hidden_state
UpperCamelCase_ : Optional[Any] = result.past_key_values
UpperCamelCase_ : Optional[int] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Tuple , snake_case : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = UMTaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
# first forward pass
UpperCamelCase_ : str = model(snake_case , use_cache=snake_case )
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Dict = model(snake_case , use_cache=snake_case )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) )
self.parent.assertTrue(len(snake_case ) == len(snake_case ) + 1 )
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : List[Any] = model(snake_case )['last_hidden_state']
UpperCamelCase_ : List[str] = model(snake_case , past_key_values=snake_case )['last_hidden_state']
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase_ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : int , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = UMTaModel(config=snake_case ).to(snake_case ).half().eval()
UpperCamelCase_ : Union[str, Any] = model(**snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(snake_case ).any().item() )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = True
lowercase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : List[str] = UMTaModel(config_and_inputs[0] ).to(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Union[str, Any] = config_and_inputs[0]
UpperCamelCase_ : Tuple = UMTaForConditionalGeneration(snake_case ).eval()
model.to(snake_case )
UpperCamelCase_ : str = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case ),
}
for attn_name, (name, mask) in zip(snake_case , head_masking.items() ):
UpperCamelCase_ : Optional[int] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase_ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case )
UpperCamelCase_ : Any = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=snake_case , return_dict_in_generate=snake_case , **snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase_ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : str = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=snake_case ).to(snake_case )
UpperCamelCase_ : int = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=snake_case , legacy=snake_case )
UpperCamelCase_ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
UpperCamelCase_ : Dict = tokenizer(snake_case , return_tensors='pt' , padding=snake_case ).input_ids
# fmt: off
UpperCamelCase_ : List[str] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case , snake_case )
UpperCamelCase_ : int = model.generate(input_ids.to(snake_case ) )
UpperCamelCase_ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
UpperCamelCase_ : Dict = tokenizer.batch_decode(snake_case )
self.assertEqual(snake_case , snake_case )
| 50
| 0
|
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
__lowerCamelCase = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
| 1
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase_ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase_ (lowerCamelCase__ ):
__magic_name__ = '''esm'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[str]=768 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Union[str, Any]=3_072 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[int]=1_026 , lowerCAmelCase_ : Optional[Any]=0.0_2 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : Optional[Any]="absolute" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> List[Any]:
super().__init__(pad_token_id=__lowerCamelCase , mask_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Dict = emb_layer_norm_before
UpperCAmelCase_ : int = token_dropout
UpperCAmelCase_ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase_ : Optional[int] = EsmFoldConfig()
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = EsmFoldConfig(**__lowerCamelCase )
UpperCAmelCase_ : int = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase_ : Dict = get_default_vocab_list()
else:
UpperCAmelCase_ : Any = vocab_list
else:
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __lowerCamelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_ : int = super().to_dict()
if isinstance(self.esmfold_config , __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase_ :
__magic_name__ = None
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 1_28
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
if self.trunk is None:
UpperCAmelCase_ : Union[str, Any] = TrunkConfig()
elif isinstance(self.trunk , __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = TrunkConfig(**self.trunk )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = asdict(self )
UpperCAmelCase_ : str = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase_ :
__magic_name__ = 48
__magic_name__ = 10_24
__magic_name__ = 1_28
__magic_name__ = 32
__magic_name__ = 32
__magic_name__ = 32
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 4
__magic_name__ = 1_28
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
if self.structure_module is None:
UpperCAmelCase_ : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module , __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase_ : Tuple = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_ : Union[str, Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : int = asdict(self )
UpperCAmelCase_ : List[str] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase_ :
__magic_name__ = 3_84
__magic_name__ = 1_28
__magic_name__ = 16
__magic_name__ = 1_28
__magic_name__ = 12
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 0.1
__magic_name__ = 8
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 7
__magic_name__ = 10
__magic_name__ = 1e-8
__magic_name__ = 1e5
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return asdict(self )
def snake_case ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 356
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
if components is None:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ) -> int:
return len(self.__components )
def __str__( self : List[str] ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Dict , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : Optional[int] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : List[str] , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : List[str] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , lowerCAmelCase_ : float ) -> Vector:
...
@overload
def __mul__( self : Optional[int] , lowerCAmelCase_ : Vector ) -> float:
...
def __mul__( self : Dict , lowerCAmelCase_ : float | Vector ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
UpperCAmelCase_ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = len(self )
UpperCAmelCase_ : Dict = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("invalid operand!" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase_ : List[str] = value
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
UpperCAmelCase_ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
UpperCAmelCase_ : int = self * other
UpperCAmelCase_ : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( A__ ):
assert isinstance(A__ ,A__ )
return Vector([0] * dimension )
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ ) and (isinstance(A__ ,A__ ))
UpperCAmelCase_ : Any = [0] * dimension
UpperCAmelCase_ : Dict = 1
return Vector(A__ )
def snake_case ( A__ ,A__ ,A__ ):
assert (
isinstance(A__ ,A__ )
and isinstance(A__ ,A__ )
and (isinstance(A__ ,(int, float) ))
)
return x * scalar + y
def snake_case ( A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : Tuple = [random.randint(A__ ,A__ ) for _ in range(A__ )]
return Vector(A__ )
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : List[Any] = matrix
UpperCAmelCase_ : List[Any] = w
UpperCAmelCase_ : List[Any] = h
def __str__( self : int ) -> str:
UpperCAmelCase_ : Tuple = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : List[Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Optional[Any] = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Union[str, Any] = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : float ) -> Matrix:
...
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : Vector ) -> Vector:
...
def __mul__( self : Any , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
UpperCAmelCase_ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase_ : Any = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
UpperCAmelCase_ : int = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.__height
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.__width
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase_ : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
UpperCAmelCase_ : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("Indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : list[list[float]] = [[0] * n for _ in range(A__ )]
return Matrix(A__ ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : list[list[float]] = [
[random.randint(A__ ,A__ ) for _ in range(A__ )] for _ in range(A__ )
]
return Matrix(A__ ,A__ ,A__ )
| 253
| 0
|
# flake8: noqa
# Lint as: python3
snake_case_ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 214
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = """switch_transformers"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , a=3_2128 , a=768 , a=64 , a=2048 , a=64 , a=12 , a=3 , a=12 , a=3 , a=12 , a=8 , a=False , a=0.01 , a="float32" , a=False , a=32 , a=128 , a=0.1 , a=1e-6 , a=0.001 , a=0.001 , a=1.0 , a="relu" , a=True , a=False , a=True , a=0 , a=1 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = d_kv
lowercase__ : Any = d_ff
lowercase__ : Optional[int] = num_sparse_encoder_layers
lowercase__ : Tuple = num_layers
lowercase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase__ : List[Any] = num_heads
lowercase__ : Union[str, Any] = num_experts
lowercase__ : str = expert_capacity
lowercase__ : List[Any] = router_bias
lowercase__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
lowercase__ : str = router_dtype
lowercase__ : Optional[int] = router_ignore_padding_tokens
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : str = layer_norm_epsilon
lowercase__ : int = initializer_factor
lowercase__ : int = feed_forward_proj
lowercase__ : Dict = use_cache
lowercase__ : int = add_router_probs
lowercase__ : int = router_z_loss_coef
lowercase__ : List[Any] = router_aux_loss_coef
lowercase__ : int = self.feed_forward_proj.split('-')
lowercase__ : Optional[int] = act_info[-1]
lowercase__ : Dict = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
| 214
| 1
|
import re
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A__ , A__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 351
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
_lowercase : CommonSchedulerState
# setable values
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: CommonSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray ) -> List[str]:
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : DDPMSchedulerState
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowercase : jnp.dtype
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self: Any , UpperCamelCase_: int = 1_000 , UpperCamelCase_: float = 0.0001 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[jnp.ndarray] = None , UpperCamelCase_: str = "fixed_small" , UpperCamelCase_: bool = True , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: jnp.dtype = jnp.floataa , ) -> int:
"""simple docstring"""
lowercase__ = dtype
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: int , UpperCamelCase_: Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=None ) -> List[Any]:
"""simple docstring"""
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(UpperCamelCase_ , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Any , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: int , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[jax.random.KeyArray] = None , UpperCamelCase_: bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(UpperCamelCase_ , num=1 )
lowercase__ = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self: str ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 93
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase :Tuple = 1024
lowercase :Tuple = 4096
lowercase :Union[str, Any] = 24
lowercase :Tuple = 16
lowercase :Optional[Any] = [5, 11, 17, 23]
lowercase :Optional[Any] = [256, 512, 1024, 1024]
lowercase :Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase :Any = 768
lowercase :Optional[int] = [1, 1, 1, 0.5]
lowercase :int = [256, 512, 768, 768]
lowercase :str = 150
lowercase :Tuple = 16
lowercase :List[str] = (1, 384, 384)
lowercase :Any = False
lowercase :Optional[int] = "project"
if "ade" in checkpoint_url:
lowercase :Dict = True
lowercase :str = 768
lowercase :Tuple = [1, 1, 1, 0.5]
lowercase :Dict = 150
lowercase :Any = 16
lowercase :List[Any] = "huggingface/label-files"
lowercase :Union[str, Any] = "ade20k-id2label.json"
lowercase :List[str] = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type="dataset" ) ), "r" ) )
lowercase :Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Union[str, Any] = idalabel
lowercase :Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase :List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase :Any = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
lowercase :int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
lowercase :Any = name.replace("patch_embed", "" )
if "pos_embed" in name:
lowercase :Optional[Any] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
lowercase :List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase :str = name.replace("proj", "projection" )
if "blocks" in name:
lowercase :List[Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :Any = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase :Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase :Any = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
lowercase :Optional[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
lowercase :Union[str, Any] = name.replace("scratch", "neck" )
if "layer1_rn" in name:
lowercase :str = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
lowercase :Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
lowercase :Tuple = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
lowercase :Optional[int] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
lowercase :Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase :Any = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase :str = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
lowercase :Optional[Any] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
lowercase :int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
lowercase :Optional[int] = name.replace("conv1", "convolution1" )
if "conv2" in name:
lowercase :List[str] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase :Tuple = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase :Any = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase :Optional[Any] = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase :str = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase :str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase :Any = name.replace("pretrained", "dpt" )
if "bn" in name:
lowercase :Optional[int] = name.replace("bn", "batch_norm" )
if "head" in name:
lowercase :Union[str, Any] = name.replace("head", "head.head" )
if "encoder.norm" in name:
lowercase :Optional[Any] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
lowercase :str = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
lowercase :List[str] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
lowercase :Optional[int] = name.replace("..", "." )
if "stem.conv" in name:
lowercase :Union[str, Any] = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
lowercase :List[str] = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
lowercase :int = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
lowercase :List[str] = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase :Dict = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
lowercase :Dict = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase :str = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase :List[str] = in_proj_weight[: config.hidden_size, :]
lowercase :int = in_proj_bias[: config.hidden_size]
lowercase :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase :List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( ):
lowercase :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase :Any = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase , lowercase :Optional[int] = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase :List[str] = torch.load(lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase :int = state_dict.pop(lowerCamelCase )
lowercase :Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowercase :int = DPTForSemanticSegmentation(lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase :Union[str, Any] = 480 if "ade" in checkpoint_url else 384
lowercase :Optional[int] = DPTImageProcessor(size=lowerCamelCase )
lowercase :Dict = prepare_img()
lowercase :Optional[Any] = image_processor(lowerCamelCase, return_tensors="pt" )
# forward pass
lowercase :Any = model(**lowerCamelCase ).logits if "ade" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
lowercase :List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236
|
import numpy
# List of input, output pairs
_UpperCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Union[str, Any] = len(train_data)
_UpperCAmelCase : Dict = 0.0_0_9
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase="train" ):
return calculate_hypothesis_value(lowerCamelCase, lowerCamelCase ) - output(
lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=m ):
lowercase :Union[str, Any] = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = summation_of_cost_derivative(lowerCamelCase, lowerCamelCase ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase :str = 0.000_002
lowercase :Tuple = 0
lowercase :Optional[int] = 0
while True:
j += 1
lowercase :Union[str, Any] = [0, 0, 0, 0]
for i in range(0, len(lowerCamelCase ) ):
lowercase :Dict = get_cost_derivative(i - 1 )
lowercase :Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase, lowerCamelCase, atol=lowerCamelCase, rtol=lowerCamelCase, ):
break
lowercase :Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase__ ( ):
for i in range(len(lowerCamelCase ) ):
print(("Actual output value:", output(lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 236
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class SCREAMING_SNAKE_CASE__ :
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
return None
class SCREAMING_SNAKE_CASE__ :
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return None
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 1_2 , **__a)
@require_torch
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 1_2 , **__a)
@require_torch
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
from transformers import BertModel
lowercase_ = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""") as vocab_file:
vocab_file.write("""\n""".join(__a))
vocab_file.flush()
lowercase_ = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
lowercase_ = BertModel(BertConfig(vocab_size=len(__a)))
model.save_pretrained(__a)
self._test_export(__a , """pt""" , 1_2 , __a)
@require_tf
@slow
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ = self._test_export(__a , """tf""" , 1_2 , **__a)
lowercase_ = quantize(Path(__a))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""")
@require_torch
@slow
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ = self._test_export(__a , """pt""" , 1_2 , **__a)
lowercase_ = quantize(__a)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""")
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Any):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ = Path(__a).joinpath("""model.onnx""")
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a)
return path
except Exception as e:
self.fail(__a)
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
from transformers import BertModel
lowercase_ = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random"""))
lowercase_ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""")
self._test_infer_dynamic_axis(__a , __a , """pt""")
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
from transformers import TFBertModel
lowercase_ = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random"""))
lowercase_ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""")
self._test_infer_dynamic_axis(__a , __a , """tf""")
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = FeatureExtractionPipeline(__a , __a)
lowercase_ = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase_ , lowercase_ , lowercase_ , lowercase_ = infer_shapes(__a , __a)
# Assert all variables are present
self.assertEqual(len(__a) , len(__a))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3] , __a)
self.assertSequenceEqual(variable_names[3:] , __a)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""})
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""})
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase_ = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase_ , lowercase_ = ensure_valid_input(FuncContiguousArgs() , __a , __a)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a) , 3)
# Should have exactly the same input names
self.assertEqual(set(__a) , set(__a))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ = ensure_valid_input(FuncNonContiguousArgs() , __a , __a)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a) , 1)
self.assertEqual(len(__a) , 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""])
self.assertEqual(ordered_input_names[0] , """input_ids""")
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""") , """-test""")
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix())
| 368
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
lowerCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
_lowercase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
_lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
_lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_lowercase : int = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
_lowercase : float = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = {}
if self.train_dir is not None:
lowercase__ = self.train_dir
if self.validation_dir is not None:
lowercase__ = self.validation_dir
lowercase__ = data_files if data_files else None
@dataclass
class _a :
_lowercase : str = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : str = field(default=UpperCamelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class _a :
def __init__( self: Optional[int] , UpperCamelCase_: Optional[Any]=192 , UpperCamelCase_: Optional[int]=32 , UpperCamelCase_: int=4 , UpperCamelCase_: List[Any]=0.6 ) -> Tuple:
"""simple docstring"""
lowercase__ = input_size
lowercase__ = mask_patch_size
lowercase__ = model_patch_size
lowercase__ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
lowercase__ = self.input_size // self.mask_patch_size
lowercase__ = self.mask_patch_size // self.model_patch_size
lowercase__ = self.rand_size**2
lowercase__ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase__ = np.zeros(self.token_count , dtype=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = mask.reshape((self.rand_size, self.rand_size) )
lowercase__ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase__ = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase__ = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase__ = split['''train''']
lowercase__ = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(SCREAMING_SNAKE_CASE , '''decoder_type''' ):
lowercase__ = '''simmim'''
# adapt config
lowercase__ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase__ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase__ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase__ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE )
else:
lowercase__ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase__ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE )
if training_args.do_train:
lowercase__ = ds['''train'''].column_names
else:
lowercase__ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase__ = data_args.image_column_name
elif "image" in column_names:
lowercase__ = '''image'''
elif "img" in column_names:
lowercase__ = '''img'''
else:
lowercase__ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase__ = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase__ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(SCREAMING_SNAKE_CASE ):
lowercase__ = [transforms(SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
lowercase__ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initialize our trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 110
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Tuple = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase__ : Optional[int] = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Union[str, Any] = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCamelCase__ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
lowerCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 50
| 0
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCAmelCase ( ) -> int:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=UpperCamelCase_ , default=UpperCamelCase_ , required=UpperCamelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=UpperCamelCase_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=UpperCamelCase_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=UpperCamelCase_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=UpperCamelCase_ , default=0 , help="""cuda_id.""" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
if not len(UpperCamelCase_ ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = imgs[0].size
__SCREAMING_SNAKE_CASE = Image.new("""RGB""" , size=(cols * w, rows * h) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = grid.size
for i, img in enumerate(UpperCamelCase_ ):
grid.paste(UpperCamelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_="robotic cat with wings" , UpperCamelCase_=7.5 , UpperCamelCase_=50 , UpperCamelCase_=1 , UpperCamelCase_=42 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , ).images
__SCREAMING_SNAKE_CASE = int(math.sqrt(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = image_grid(UpperCamelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__magic_name__ = parse_args()
# Load models and create wrapper for stable diffusion
__magic_name__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__magic_name__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__magic_name__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__magic_name__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__magic_name__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__magic_name__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__magic_name__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__magic_name__ = unet.to(torch.device("cuda", args.cuda_id))
__magic_name__ = pipeline.to(unet.device)
__magic_name__, __magic_name__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__magic_name__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 358
|
"""simple docstring"""
from string import ascii_uppercase
__magic_name__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while div != 1:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = divmod(UpperCamelCase_ , UpperCamelCase_ )
if base >= 11 and 9 < mod < 36:
__SCREAMING_SNAKE_CASE = ALPHABET_VALUES[str(UpperCamelCase_ )]
else:
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ )
new_value += actual_value
__SCREAMING_SNAKE_CASE = num // base
__SCREAMING_SNAKE_CASE = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 255
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 88
|
import os
def A_ ( a = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE_ : Dict = in_file.read()
SCREAMING_SNAKE_CASE_ : Dict = [[int(a ) for cell in row.split(',' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE_ : str = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE_ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE_ : Any = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 253
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273
| 1
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]:
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : Dict = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_choices
lowercase__ : str = rescale_embeddings
lowercase__ : Optional[Any] = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[int] = block_size
lowercase__ : str = num_random_blocks
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A : List[str] = False
__A : Any = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : List[str] ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Tuple ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
super().test_hidden_states_output()
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCamelCase ( self : str ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : int = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 87
|
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93
| 0
|
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> int:
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> List[Any]:
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
if isinstance(A_ , A_ ):
lowerCAmelCase__ : List[Any] = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
lowerCAmelCase__ : List[str] = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
lowerCAmelCase__ : Any = tensor[:sequence_length]
else:
lowerCAmelCase__ : Optional[int] = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
lowerCAmelCase__ : str = tensor[:sequence_length]
else:
lowerCAmelCase__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Tuple = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowerCAmelCase__ : int = unicodedata.category(A_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -100
lowercase__ = "pt"
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Union[str, Any] ):
import torch
lowerCAmelCase__ : int = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCAmelCase__ : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase__ : Tuple = self.tokenizer.pad(
lowercase_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' if labels is None else None ,)
if labels is None:
return batch
lowerCAmelCase__ : List[str] = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowerCAmelCase__ : Optional[Any] = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase__ : List[str] = [
list(lowercase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase_ )) for label in labels
]
else:
lowerCAmelCase__ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowercase_ )) + list(lowercase_ ) for label in labels
]
lowerCAmelCase__ : Tuple = [feature['''ner_tags'''] for feature in features]
lowerCAmelCase__ : str = padding_tensor(lowercase_ ,-1 ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[int] = [feature['''original_entity_spans'''] for feature in features]
lowerCAmelCase__ : Optional[Any] = padding_tensor(lowercase_ ,(-1, -1) ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[Any] = {k: torch.tensor(lowercase_ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 106
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = original_name.split('''.''' )[0]
snake_case__ : List[str] = key.split('''.''' )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 2] )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 1] )
snake_case__ : Any = orig_block_num - offset
snake_case__ : Tuple = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = OrderedDict()
snake_case__ , snake_case__ : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case__ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case__ : Tuple = key[: key.find('''proj''' )]
snake_case__ : Union[str, Any] = key.replace(__lowerCAmelCase , f"""patch_embeddings.{total_embed_found}.""" )
snake_case__ : Dict = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case__ : Optional[int] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case__ : Optional[Any] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case__ : int = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case__ : Tuple = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case__ : str = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case__ : Union[str, Any] = key.replace('''head''' , '''classifier''' )
snake_case__ : Union[str, Any] = value
return new_state_dict
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = PoolFormerConfig()
# set attributes based on model_name
snake_case__ : List[Any] = '''huggingface/label-files'''
snake_case__ : Union[str, Any] = model_name[-3:]
snake_case__ : List[Any] = 1000
snake_case__ : Tuple = '''imagenet-1k-id2label.json'''
snake_case__ : Optional[int] = (1, 1000)
# set config attributes
snake_case__ : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case__ : List[str] = [2, 2, 6, 2]
snake_case__ : Union[str, Any] = [64, 128, 320, 512]
snake_case__ : Optional[int] = 4.0
snake_case__ : Tuple = 0.9
elif size == "s24":
snake_case__ : Tuple = [4, 4, 12, 4]
snake_case__ : Tuple = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Dict = 0.9
elif size == "s36":
snake_case__ : Optional[Any] = [6, 6, 18, 6]
snake_case__ : str = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Any = 1E-6
snake_case__ : Any = 0.9
elif size == "m36":
snake_case__ : Any = [6, 6, 18, 6]
snake_case__ : Union[str, Any] = [96, 192, 384, 768]
snake_case__ : Dict = 4.0
snake_case__ : Union[str, Any] = 1E-6
snake_case__ : List[Any] = 0.95
elif size == "m48":
snake_case__ : Optional[int] = [8, 8, 24, 8]
snake_case__ : List[str] = [96, 192, 384, 768]
snake_case__ : str = 4.0
snake_case__ : str = 1E-6
snake_case__ : Any = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
# Prepare image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case__ : List[str] = torch.load(__lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case__ : str = rename_keys(__lowerCAmelCase )
# create HuggingFace model and load state dict
snake_case__ : List[str] = PoolFormerForImageClassification(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Define image processor
snake_case__ : int = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case__ : Dict = model(__lowerCAmelCase )
snake_case__ : str = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case__ : Tuple = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case__ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case__ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case__ : Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case__ : List[str] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : Tuple , a__ : int , a__ : List[Any]=13 , a__ : Optional[int]=3 , a__ : List[str]=224 , a__ : List[Any]=30 , a__ : Union[str, Any]=400 , a__ : str=True , a__ : List[Any]=None , a__ : int=True , a__ : Dict=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__snake_case = size if size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def a (self : Any ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = EfficientFormerImageProcessorTester(self )
@property
def a (self : List[str] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a (self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
def a (self : Any ):
"""simple docstring"""
pass
def a (self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__snake_case = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processor(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__snake_case = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processor(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__snake_case = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processor(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 24
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 42
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self : Optional[int], lowerCAmelCase : int = 32, lowerCAmelCase : int = 64, lowerCAmelCase : int = 20, lowerCAmelCase : int = 768, lowerCAmelCase : Optional[Any]=77, lowerCAmelCase : Tuple=4, lowerCAmelCase : float = 0.0, lowerCAmelCase : str = "silu", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "linear", lowerCAmelCase : Optional[str] = "prd", lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, ) -> List[Any]:
super().__init__()
lowercase : List[Any] = num_attention_heads
lowercase : int = attention_head_dim
lowercase : List[Any] = num_attention_heads * attention_head_dim
lowercase : Tuple = additional_embeddings
lowercase : Dict = time_embed_dim or inner_dim
lowercase : Optional[Any] = embedding_proj_dim or embedding_dim
lowercase : int = clip_embed_dim or embedding_dim
lowercase : List[str] = Timesteps(lowerCAmelCase, lowerCAmelCase, 0 )
lowercase : List[str] = TimestepEmbedding(lowerCAmelCase, lowerCAmelCase, out_dim=lowerCAmelCase, act_fn=lowerCAmelCase )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if embedding_proj_norm_type is None:
lowercase : str = None
elif embedding_proj_norm_type == "layer":
lowercase : Tuple = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if encoder_hid_proj_type is None:
lowercase : Optional[int] = None
elif encoder_hid_proj_type == "linear":
lowercase : Dict = nn.Linear(lowerCAmelCase, lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowercase : Dict = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCAmelCase ) )
if added_emb_type == "prd":
lowercase : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCAmelCase ) )
elif added_emb_type is None:
lowercase : str = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowercase : Dict = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dropout=lowerCAmelCase, activation_fn='gelu', attention_bias=lowerCAmelCase, )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
lowercase : str = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
lowercase : Optional[int] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowercase : int = nn.LayerNorm(lowerCAmelCase )
lowercase : str = nn.Linear(lowerCAmelCase, lowerCAmelCase )
lowercase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
lowercase : List[str] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask', lowerCAmelCase, persistent=lowerCAmelCase )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self : Tuple ) -> Dict[str, AttentionProcessor]:
lowercase : Any = {}
def fn_recursive_add_processors(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
lowercase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return processors
def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
lowercase : str = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Union[str, Any] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Union[torch.Tensor, float, int], lowerCAmelCase : torch.FloatTensor, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : Optional[torch.BoolTensor] = None, lowerCAmelCase : bool = True, ) -> List[Any]:
lowercase : Optional[Any] = hidden_states.shape[0]
lowercase : Union[str, Any] = timestep
if not torch.is_tensor(lowerCAmelCase ):
lowercase : List[str] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Optional[int] = timesteps * torch.ones(lowerCAmelCase, dtype=timesteps.dtype, device=timesteps.device )
lowercase : Dict = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
lowercase : Any = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
lowercase : Any = self.embedding_proj_norm(lowerCAmelCase )
lowercase : List[str] = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase : str = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase : Optional[Any] = self.proj_in(lowerCAmelCase )
lowercase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
lowercase : Dict = []
lowercase : Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase : Union[str, Any] = hidden_states[:, None, :]
lowercase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase, -1, -1 )
additional_embeds.append(lowerCAmelCase )
lowercase : Union[str, Any] = torch.cat(
lowerCAmelCase, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase : List[Any] = F.pad(
lowerCAmelCase, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
lowercase : str = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase : Tuple = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowercase : List[Any] = F.pad(lowerCAmelCase, (0, self.additional_embeddings), value=0.0 )
lowercase : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
lowercase : List[Any] = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
lowercase : Tuple = block(lowerCAmelCase, attention_mask=lowerCAmelCase )
lowercase : Optional[Any] = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
lowercase : Optional[Any] = hidden_states[:, -1]
else:
lowercase : Any = hidden_states[:, additional_embeddings_len:]
lowercase : Optional[int] = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : Dict ) -> Dict:
lowercase : int = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 255
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Union[str, Any] ={
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =[
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__snake_case : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] ={
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94
| 0
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = FunnelConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 273
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_a : List[str] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
_a : str = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
_a : Optional[int] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
_a : Optional[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
_a : Optional[int] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
_a : Dict = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
_a : int = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
_a : str = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
_a : Optional[int] = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
_a : Optional[Any] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
_a : str = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
_a : List[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
_a : Any = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
_a : Optional[int] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
_a : Union[str, Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_a : Any = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_a : Tuple = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_a : Tuple = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
_a : List[Any] = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
_a : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
_a : Union[str, Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
_a : List[str] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
_a : Union[str, Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
_a : Any = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a : List[Any] = orig_state_dict.pop(__A )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a : Tuple = key.split(""".""" )
_a , _a : Optional[Any] = int(key_split[2] ), int(key_split[4] )
_a : Dict = config.vision_config.hidden_size
if "weight" in key:
_a : List[str] = val[:dim, :]
_a : Any = val[dim : dim * 2, :]
_a : Any = val[-dim:, :]
else:
_a : List[str] = val[:dim]
_a : Optional[Any] = val[dim : dim * 2]
_a : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a : Tuple = key.split(""".""" )
_a : Optional[Any] = int(key_split[3] )
_a : Dict = config.text_config.hidden_size
if "weight" in key:
_a : Optional[int] = val[:dim, :]
_a : Union[str, Any] = val[
dim : dim * 2, :
]
_a : str = val[-dim:, :]
else:
_a : Optional[int] = val[:dim]
_a : Dict = val[dim : dim * 2]
_a : Optional[int] = val[-dim:]
else:
_a : str = rename_key(__A )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_a : Any = val.squeeze_()
else:
_a : str = val
return orig_state_dict
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="groupvit-gcc-yfcc" , UpperCamelCase__=False ):
'''simple docstring'''
_a : List[Any] = GroupViTConfig()
_a : str = GroupViTModel(__A ).eval()
_a : int = torch.load(__A , map_location="""cpu""" )["""model"""]
_a : Optional[Any] = convert_state_dict(__A , __A )
_a , _a : List[str] = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A ) == 0)
# verify result
_a : Dict = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
_a : Optional[int] = prepare_img()
_a : Tuple = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=__A , padding=__A , return_tensors="""pt""" )
with torch.no_grad():
_a : Optional[Any] = model(**__A )
if model_name == "groupvit-gcc-yfcc":
_a : Dict = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
_a : Dict = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , __A , atol=1e-3 )
processor.save_pretrained(__A )
model.save_pretrained(__A )
print("""Successfully saved processor and model to""" , __A )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(__A , organization="""nielsr""" )
model.push_to_hub(__A , organization="""nielsr""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 357
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324
| 0
|
from __future__ import annotations
__A =list[list[int]]
# assigning initial values to the grid
__A =[
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A =[
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( lowerCamelCase__ ):
if location := find_empty_location(lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
lowerCamelCase_ = 0
return None
def lowerCamelCase_ ( lowerCamelCase__ ):
for row in grid:
for cell in row:
print(lowerCamelCase__ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
__A =sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 19
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE ={
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A_ (SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : int = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = """<unk> UNwanted , running"""
UpperCAmelCase_ : List[str] = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=a__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(a__ , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [0, 4, 8, 7] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TransfoXLTokenizer(lower_case=a__ )
UpperCAmelCase_ : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCAmelCase_ : Tuple = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(a__ ) , a__ )
self.assertEqual(tokenizer.convert_tokens_to_string(a__ ) , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = len(a__ )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(a__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 61
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = torch.exp(_SCREAMING_SNAKE_CASE )
snake_case_ = torch.sum(_SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
snake_case_ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_SCREAMING_SNAKE_CASE ) - B / A
class __A (nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Tuple ) ->int:
"""simple docstring"""
super().__init__()
snake_case_ = config.output_attentions
snake_case_ = config.output_hidden_states
snake_case_ = nn.ModuleList([BertLayer(UpperCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
snake_case_ = nn.ModuleList([BertHighway(UpperCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
snake_case_ = [-1 for _ in range(config.num_hidden_layers )]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
if (type(UpperCAmelCase_ ) is float) or (type(UpperCAmelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ = x
else:
snake_case_ = x
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = ()
snake_case_ = ()
snake_case_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = layer_module(
UpperCAmelCase_ , UpperCAmelCase_ , head_mask[i] , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = layer_outputs[0]
if self.output_attentions:
snake_case_ = all_attentions + (layer_outputs[1],)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = current_outputs + (all_attentions,)
snake_case_ = self.highway[i](UpperCAmelCase_ )
# logits, pooled_output
if not self.training:
snake_case_ = highway_exit[0]
snake_case_ = entropy(UpperCAmelCase_ )
snake_case_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase_ , i + 1 )
else:
snake_case_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = outputs + (all_attentions,)
snake_case_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case__ , )
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : List[str] ) ->Tuple:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
snake_case_ = config
snake_case_ = BertEmbeddings(UpperCAmelCase_ )
snake_case_ = DeeBertEncoder(UpperCAmelCase_ )
snake_case_ = BertPooler(UpperCAmelCase_ )
self.init_weights()
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = value
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Tuple ) ->Optional[int]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , ) ->Optional[int]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
snake_case_ = input_ids.size()
elif inputs_embeds is not None:
snake_case_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
snake_case_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if encoder_attention_mask is None:
snake_case_ = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
snake_case_ = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ = encoder_attention_mask[:, None, None, :]
snake_case_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
snake_case_ = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
snake_case_ = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(UpperCAmelCase_ )
snake_case_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = message
snake_case_ = exit_layer # start from 1!
class __A (nn.Module):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : str ) ->List[Any]:
"""simple docstring"""
super().__init__()
snake_case_ = BertPooler(UpperCAmelCase_ )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , config.num_labels )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(UpperCAmelCase_ )
# "return" pooler_output
# BertModel
snake_case_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ = bmodel_output[1]
snake_case_ = self.dropout(UpperCAmelCase_ )
snake_case_ = self.classifier(UpperCAmelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case__ , )
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
snake_case_ = config.num_labels
snake_case_ = config.num_hidden_layers
snake_case_ = DeeBertModel(UpperCAmelCase_ )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[Any]=-1 , UpperCAmelCase_ : Optional[Any]=False , ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.num_layers
try:
snake_case_ = self.bert(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ = outputs[1]
snake_case_ = self.dropout(UpperCAmelCase_ )
snake_case_ = self.classifier(UpperCAmelCase_ )
snake_case_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ = e.message
snake_case_ = e.exit_layer
snake_case_ = outputs[0]
if not self.training:
snake_case_ = entropy(UpperCAmelCase_ )
snake_case_ = []
snake_case_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ = []
for highway_exit in outputs[-1]:
snake_case_ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase_ )
if train_highway:
snake_case_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ = (loss,) + outputs
if not self.training:
snake_case_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 366
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__SCREAMING_SNAKE_CASE : Dict = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__SCREAMING_SNAKE_CASE : Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__SCREAMING_SNAKE_CASE : Tuple = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int="auto" , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Optional[Any]=0.9 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Optional[int]=500 , UpperCAmelCase_ : Any="gpt2-large" , UpperCAmelCase_ : Union[str, Any]=-1 , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : Dict=25 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=25 , ) ->List[Any]:
"""simple docstring"""
snake_case_ = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 233
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( ):
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def lowerCamelCase__ ( _A ):
a : List[str] = 1
a : List[str] = 2
while i * i <= n:
a : int = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase__ ( ):
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 297
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :Optional[Any] = set(UpperCAmelCase_ ), [start]
while stack:
a :Optional[int] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case : Optional[int] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 94
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = []
_lowercase : Optional[int] = []
for i in range(self.num_layers ):
_lowercase : List[Any] = self.in_channels if i == 0 else self.out_channels
_lowercase : int = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_lowercase : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_lowercase : Union[str, Any] = resnets
_lowercase : Any = attentions
if self.add_downsample:
_lowercase : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
_lowercase : Any = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowercase : str = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_lowercase : Dict = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowercase : Union[str, Any] = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = []
for i in range(self.num_layers ):
_lowercase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
_lowercase : Tuple = FlaxResnetBlockaD(
in_channels=_UpperCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_lowercase : Optional[Any] = resnets
if self.add_downsample:
_lowercase : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
_lowercase : Dict = ()
for resnet in self.resnets:
_lowercase : Any = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowercase : Dict = self.downsamplers_a(_UpperCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = []
_lowercase : Optional[int] = []
for i in range(self.num_layers ):
_lowercase : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowercase : Dict = self.prev_output_channel if i == 0 else self.out_channels
_lowercase : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_lowercase : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_lowercase : int = resnets
_lowercase : Optional[int] = attentions
if self.add_upsample:
_lowercase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowercase : Any = res_hidden_states_tuple[-1]
_lowercase : Optional[Any] = res_hidden_states_tuple[:-1]
_lowercase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowercase : List[str] = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_lowercase : int = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_lowercase : Optional[Any] = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = []
for i in range(self.num_layers ):
_lowercase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowercase : str = self.prev_output_channel if i == 0 else self.out_channels
_lowercase : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_lowercase : List[str] = resnets
if self.add_upsample:
_lowercase : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowercase : Optional[int] = res_hidden_states_tuple[-1]
_lowercase : Tuple = res_hidden_states_tuple[:-1]
_lowercase : Any = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowercase : Optional[Any] = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
if self.add_upsample:
_lowercase : Any = self.upsamplers_a(_UpperCamelCase )
return hidden_states
class a__ ( nn.Module ):
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowercase : Optional[int] = []
for _ in range(self.num_layers ):
_lowercase : Any = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCamelCase )
_lowercase : int = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCamelCase )
_lowercase : Tuple = resnets
_lowercase : Optional[int] = attentions
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
"""simple docstring"""
_lowercase : List[Any] = self.resnets[0](_UpperCamelCase , _UpperCamelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowercase : Tuple = attn(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
_lowercase : Optional[Any] = resnet(_UpperCamelCase , _UpperCamelCase , deterministic=_UpperCamelCase )
return hidden_states
| 199
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = 'roc_bert'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=768 , _UpperCamelCase=910 , _UpperCamelCase=512 , _UpperCamelCase=24858 , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = vocab_size
_lowercase : List[str] = max_position_embeddings
_lowercase : List[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Dict = initializer_range
_lowercase : List[Any] = type_vocab_size
_lowercase : Tuple = layer_norm_eps
_lowercase : Optional[int] = use_cache
_lowercase : Tuple = enable_pronunciation
_lowercase : Optional[int] = enable_shape
_lowercase : int = pronunciation_embed_dim
_lowercase : List[str] = pronunciation_vocab_size
_lowercase : int = shape_embed_dim
_lowercase : str = shape_vocab_size
_lowercase : str = concat_input
_lowercase : Dict = position_embedding_type
_lowercase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 199
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ReformerTokenizer
lowerCamelCase = ReformerTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<unk>' )
self.assertEqual(vocab_keys[1],'<s>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(lowercase_ ),1_0_0_0 )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> str:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
@cached_property
def snake_case__ ( self : Optional[int] )-> Any:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = 'Hello World!'
A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@slow
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
A__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A__ = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
A__ = ' '.join(lowercase_ )
A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['input_ids'].shape
A__ = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
| 7
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Dict = 'ResNetConfig'
# Base docstring
lowercase__ : str = 'microsoft/resnet-50'
lowercase__ : Tuple = [1, 20_48, 7, 7]
# Image classification docstring
lowercase__ : Optional[Any] = 'microsoft/resnet-50'
lowercase__ : List[str] = 'tiger cat'
lowercase__ : List[Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCamelCase = config.num_channels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = out_channels // reduction
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = input
for layer in self.layers:
_UpperCamelCase = layer(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ResNetConfig
_snake_case : Union[str, Any] = 'resnet'
_snake_case : Optional[int] = 'pixel_values'
_snake_case : int = True
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = value
lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = ResNetModel(lowerCAmelCase__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
_UpperCamelCase = [config.embedding_size] + config.hidden_sizes
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 324
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 192
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict=None ):
'''simple docstring'''
require_version(deps[pkg] ,__lowercase )
| 192
| 1
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase ):
try:
with open(__UpperCamelCase, "rb" ) as flax_state_f:
UpperCAmelCase_ : Optional[int] = from_bytes(__UpperCamelCase, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCamelCase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__UpperCamelCase, __UpperCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa, __UpperCamelCase ) ).values()
if any(__UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ : Tuple = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, __UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = ""
UpperCAmelCase_ : Dict = flatten_dict(__UpperCamelCase, sep="." )
UpperCAmelCase_ : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ : Dict = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCAmelCase_ : str = flax_key_tuple_array[:-1] + ["weight"]
UpperCAmelCase_ : List[str] = jnp.transpose(__UpperCamelCase, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCAmelCase_ : List[Any] = flax_key_tuple_array[:-1] + ["weight"]
UpperCAmelCase_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCAmelCase_ : str = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCamelCase ):
UpperCAmelCase_ : Dict = (
flax_key_tuple_string.replace("_0", ".0" )
.replace("_1", ".1" )
.replace("_2", ".2" )
.replace("_3", ".3" )
.replace("_4", ".4" )
.replace("_5", ".5" )
.replace("_6", ".6" )
.replace("_7", ".7" )
.replace("_8", ".8" )
.replace("_9", ".9" )
)
UpperCAmelCase_ : Any = ".".join(__UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase_ : Any = np.asarray(__UpperCamelCase ) if not isinstance(__UpperCamelCase, np.ndarray ) else flax_tensor
UpperCAmelCase_ : Any = torch.from_numpy(__UpperCamelCase )
# remove from missing keys
missing_keys.remove(__UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCamelCase )
pt_model.load_state_dict(__UpperCamelCase )
# re-transform missing_keys to list
UpperCAmelCase_ : Optional[Any] = list(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(__UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 61
|
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__snake_case : List[Any] = None
__snake_case : str = logging.get_logger(__name__)
__snake_case : Union[str, Any] = '▁'
__snake_case : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : str = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
__snake_case : Union[str, Any] = {
'google/pegasus-xsum': 512,
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: List[str]="<pad>" , _SCREAMING_SNAKE_CASE: Any="</s>" , _SCREAMING_SNAKE_CASE: Dict="<unk>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_2>" , _SCREAMING_SNAKE_CASE: Optional[int]="<mask_1>" , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=103 , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
raise TypeError(
F"""additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE)}, but is"""
F""" {type(_SCREAMING_SNAKE_CASE)}""")
__lowerCAmelCase : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_SCREAMING_SNAKE_CASE) , self.offset - 1)
]
if len(set(_SCREAMING_SNAKE_CASE)) != len(_SCREAMING_SNAKE_CASE):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
__lowerCAmelCase : Union[str, Any] = additional_special_tokens_extended
else:
__lowerCAmelCase : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset)]
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = vocab_file
__lowerCAmelCase : List[str] = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}""")
return [1 if x in all_special_ids else 0 for x in seq]
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE)
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(_SCREAMING_SNAKE_CASE):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowerCAmelCase : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 367
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowercase ( __snake_case ,__snake_case = 2 ,__snake_case = 1 ,__snake_case = 3 ,) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case ,__snake_case ,__snake_case ) -> int:
return (pow(__snake_case ,2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__lowerCAmelCase : Union[str, Any] = seed
__lowerCAmelCase : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowerCAmelCase : Tuple = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : Any = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : List[Any] = rand_fn(__snake_case ,__snake_case ,__snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowerCAmelCase : List[Any] = gcd(hare - tortoise ,__snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowerCAmelCase : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__snake_case : List[str] = parser.parse_args()
__snake_case : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
__snake_case : Any = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 58
| 0
|
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a__ : Dict = '''bert-base-cased'''
a__ : Optional[int] = '''google/pegasus-xsum'''
a__ : List[Any] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
a__ : Dict = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
a__ : Dict = '''patrickvonplaten/t5-tiny-random'''
a__ : List[Any] = '''sshleifer/bart-tiny-random'''
a__ : List[str] = '''sshleifer/tiny-mbart'''
a__ : Tuple = '''sshleifer/tiny-marian-en-de'''
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> List[Any]:
"""simple docstring"""
__A = """\n""".join(lowerCAmelCase_ )
Path(lowerCAmelCase_ ).open('w' ).writelines(lowerCAmelCase_ )
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase_ , f'{split}.source' ) , lowerCAmelCase_ )
_dump_articles(os.path.join(lowerCAmelCase_ , f'{split}.target' ) , lowerCAmelCase_ )
return tmp_dir
class UpperCamelCase__ ( __a):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowercase_ ( self :Union[str, Any] , _A :Tuple ) -> List[str]:
'''simple docstring'''
__A = AutoTokenizer.from_pretrained(__a )
__A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__A = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__A = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__A = 4
__A = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__A = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
__A = SeqaSeqDataset(
__a , data_dir=__a , type_path='train' , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
__A = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__A = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowercase_ ( self :Tuple , _A :List[Any] ) -> Tuple:
'''simple docstring'''
__A = AutoTokenizer.from_pretrained(__a )
__A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__A = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__A = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__A = 4
__A = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path='train' , max_source_length=20 , max_target_length=__a , )
__A = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
__A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__A = tmp_dir.joinpath('train.source' ).open().readlines()
__A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 128 , __a )
__A = {x.name for x in tmp_dir.iterdir()}
__A = {x.name for x in save_dir.iterdir()}
__A = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def lowercase_ ( self :int ) -> Dict:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__A = self._get_dataset(max_len=64 )
__A = 64
__A = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
__A = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
__A = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
__A = []
__A = []
for batch in data_loader:
__A = batch["""input_ids"""].shape
__A = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__A = np.product(batch['input_ids'].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(F'too many tokens in {len(__a )} batches' )
def lowercase_ ( self :Dict ) -> Tuple:
'''simple docstring'''
__A = self._get_dataset(max_len=512 )
__A = 2
__A = ds.make_sortish_sampler(__a , shuffle=__a )
__A = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
__A = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
__A = tokenizer.pad_token_id
def count_pad_tokens(_A :Any , _A :List[Any]="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k='labels' ) ) < sum(count_pad_tokens(__a , k='labels' ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def lowercase_ ( self :Tuple , _A :Any=1_000 , _A :Union[str, Any]=128 ) -> Optional[Any]:
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , __a ):
__A = """examples/seq2seq/wmt_en_ro"""
__A = max_len * 2 * 64
if not Path(__a ).joinpath('train.len' ).exists():
save_len_file(__a , __a )
else:
__A = """examples/seq2seq/test_data/wmt_en_ro"""
__A = max_len * 4
save_len_file(__a , __a )
__A = AutoTokenizer.from_pretrained(__a )
__A = SeqaSeqDataset(
__a , data_dir=__a , type_path='train' , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
__A = self._get_dataset()
__A = set(DistributedSortishSampler(__a , 256 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
__A = set(DistributedSortishSampler(__a , 256 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowercase_ ( self :Optional[Any] , _A :Dict ) -> Tuple:
'''simple docstring'''
__A = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
__A = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__A = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__A = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__A = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 161
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__lowercase : Union[str, Any] = 128
elif "12-12" in model_name:
__lowercase : Tuple = 12
__lowercase : List[Any] = 12
elif "14-14" in model_name:
__lowercase : int = 14
__lowercase : Dict = 14
elif "16-16" in model_name:
__lowercase : str = 16
__lowercase : Dict = 16
else:
raise ValueError("""Model not supported""" )
__lowercase : Union[str, Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
__lowercase : List[Any] = 35
__lowercase : str = """speech-commands-v2-id2label.json"""
else:
__lowercase : Any = 527
__lowercase : Dict = """audioset-id2label.json"""
__lowercase : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
if "module.v" in name:
__lowercase : List[str] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__lowercase : int = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__lowercase : str = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__lowercase : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__lowercase : Union[str, Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__lowercase : Tuple = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__lowercase : Optional[Any] = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__lowercase : List[str] = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ):
for key in orig_state_dict.copy().keys():
__lowercase : str = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__lowercase : Optional[int] = key.split(""".""" )
__lowercase : Optional[int] = int(key_split[3] )
__lowercase : Any = config.hidden_size
if "weight" in key:
__lowercase : Union[str, Any] = val[:dim, :]
__lowercase : Union[str, Any] = val[dim : dim * 2, :]
__lowercase : Dict = val[-dim:, :]
else:
__lowercase : Optional[int] = val[:dim]
__lowercase : Any = val[dim : dim * 2]
__lowercase : int = val[-dim:]
else:
__lowercase : Any = val
return orig_state_dict
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : int = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=False ):
__lowercase : List[Any] = get_audio_spectrogram_transformer_config(lowerCAmelCase_ )
__lowercase : Tuple = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__lowercase : str = model_name_to_url[model_name]
__lowercase : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )
# remove some keys
remove_keys(lowerCAmelCase_ )
# rename some keys
__lowercase : Optional[Any] = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
# load 🤗 model
__lowercase : str = ASTForAudioClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__lowercase : Tuple = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
__lowercase : Optional[int] = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
__lowercase : Union[str, Any] = 1024 if """speech-commands""" not in model_name else 128
__lowercase : Any = ASTFeatureExtractor(mean=lowerCAmelCase_ , std=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
if "speech-commands" in model_name:
__lowercase : Optional[int] = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__lowercase : Union[str, Any] = dataset[0]["""audio"""]["""array"""]
else:
__lowercase : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__lowercase , __lowercase : Union[str, Any] = torchaudio.load(lowerCAmelCase_ )
__lowercase : Union[str, Any] = waveform.squeeze().numpy()
__lowercase : int = feature_extractor(lowerCAmelCase_ , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
__lowercase : Tuple = model(**lowerCAmelCase_ )
__lowercase : int = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__lowercase : Union[str, Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__lowercase : Optional[int] = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__lowercase : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__lowercase : Optional[Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__lowercase : List[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__lowercase : str = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__lowercase : List[str] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__lowercase : List[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 233
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_UpperCAmelCase : Any = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :str = np.argmax(lowerCamelCase, axis=1 )
return np.sum(outputs == labels )
def UpperCAmelCase__ ( lowerCamelCase ):
with open(lowerCamelCase, encoding="utf_8" ) as f:
lowercase :List[Any] = csv.reader(lowerCamelCase )
lowercase :List[str] = []
next(lowerCamelCase ) # skip the first line
for line in tqdm(lowerCamelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :List[Any] = []
for dataset in encoded_datasets:
lowercase :Tuple = len(lowerCamelCase )
lowercase :List[str] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
lowercase :Dict = np.zeros((n_batch, 2), dtype=np.intaa )
lowercase :Dict = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
lowercase :Tuple = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase ):
lowercase :str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowercase :int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowercase :Dict = with_conta
lowercase :List[Any] = with_conta
lowercase :List[str] = len(lowerCamelCase ) - 1
lowercase :List[str] = len(lowerCamelCase ) - 1
lowercase :Dict = with_conta
lowercase :Any = with_conta
lowercase :Optional[Any] = mc_label
lowercase :List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def UpperCAmelCase__ ( ):
lowercase :List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_name", type=lowerCamelCase, default="openai-gpt", help="pretrained model name" )
parser.add_argument("--do_train", action="store_true", help="Whether to run training." )
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir", default=lowerCamelCase, type=lowerCamelCase, required=lowerCamelCase, help="The output directory where the model predictions and checkpoints will be written.", )
parser.add_argument("--train_dataset", type=lowerCamelCase, default="" )
parser.add_argument("--eval_dataset", type=lowerCamelCase, default="" )
parser.add_argument("--seed", type=lowerCamelCase, default=42 )
parser.add_argument("--num_train_epochs", type=lowerCamelCase, default=3 )
parser.add_argument("--train_batch_size", type=lowerCamelCase, default=8 )
parser.add_argument("--eval_batch_size", type=lowerCamelCase, default=16 )
parser.add_argument("--adam_epsilon", default=1e-8, type=lowerCamelCase, help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm", type=lowerCamelCase, default=1 )
parser.add_argument(
"--max_steps", default=-1, type=lowerCamelCase, help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
), )
parser.add_argument(
"--gradient_accumulation_steps", type=lowerCamelCase, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", )
parser.add_argument("--learning_rate", type=lowerCamelCase, default=6.25e-5 )
parser.add_argument("--warmup_steps", default=0, type=lowerCamelCase, help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule", type=lowerCamelCase, default="warmup_linear" )
parser.add_argument("--weight_decay", type=lowerCamelCase, default=0.01 )
parser.add_argument("--lm_coef", type=lowerCamelCase, default=0.9 )
parser.add_argument("--n_valid", type=lowerCamelCase, default=374 )
parser.add_argument("--server_ip", type=lowerCamelCase, default="", help="Can be used for distant debugging." )
parser.add_argument("--server_port", type=lowerCamelCase, default="", help="Can be used for distant debugging." )
lowercase :Optional[int] = parser.parse_args()
print(lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowercase :str = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase :Dict = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(lowerCamelCase, lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowercase :Optional[Any] = ["_start_", "_delimiter_", "_classify_"]
lowercase :Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCamelCase )
lowercase :List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
lowercase :List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCamelCase ) )
model.to(lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase ):
if isinstance(lowerCamelCase, lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase ) )
elif isinstance(lowerCamelCase, lowerCamelCase ):
return obj
return [tokenize_and_encode(lowerCamelCase ) for o in obj]
logger.info("Encoding dataset..." )
lowercase :List[Any] = load_rocstories_dataset(args.train_dataset )
lowercase :str = load_rocstories_dataset(args.eval_dataset )
lowercase :Optional[Any] = (train_dataset, eval_dataset)
lowercase :Optional[int] = tokenize_and_encode(lowerCamelCase )
# Compute the max input length for the Transformer
lowercase :List[Any] = model.config.n_positions // 2 - 2
lowercase :List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowercase :Optional[Any] = min(lowerCamelCase, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowercase :str = pre_process_datasets(lowerCamelCase, lowerCamelCase, lowerCamelCase, *lowerCamelCase )
lowercase , lowercase :Any = tensor_datasets[0], tensor_datasets[1]
lowercase :Tuple = TensorDataset(*lowerCamelCase )
lowercase :List[str] = RandomSampler(lowerCamelCase )
lowercase :Union[str, Any] = DataLoader(lowerCamelCase, sampler=lowerCamelCase, batch_size=args.train_batch_size )
lowercase :str = TensorDataset(*lowerCamelCase )
lowercase :List[str] = SequentialSampler(lowerCamelCase )
lowercase :List[str] = DataLoader(lowerCamelCase, sampler=lowerCamelCase, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowercase :Any = args.max_steps
lowercase :Dict = args.max_steps // (len(lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
lowercase :Optional[int] = len(lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
lowercase :List[str] = list(model.named_parameters() )
lowercase :int = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowercase :List[Any] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowercase :Optional[Any] = AdamW(lowerCamelCase, lr=args.learning_rate, eps=args.adam_epsilon )
lowercase :Tuple = get_linear_schedule_with_warmup(
lowerCamelCase, num_warmup_steps=args.warmup_steps, num_training_steps=lowerCamelCase )
if args.do_train:
lowercase , lowercase , lowercase :Any = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="Epoch" ):
lowercase :str = 0
lowercase :List[str] = 0
lowercase :Tuple = tqdm(lowerCamelCase, desc="Training" )
for step, batch in enumerate(lowerCamelCase ):
lowercase :Dict = tuple(t.to(lowerCamelCase ) for t in batch )
lowercase , lowercase , lowercase , lowercase :Tuple = batch
lowercase :List[str] = model(lowerCamelCase, mc_token_ids=lowerCamelCase, lm_labels=lowerCamelCase, mc_labels=lowerCamelCase )
lowercase :Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowercase :Any = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowercase :List[Any] = "Training loss: {:.2e} lr: {:.2e}".format(lowerCamelCase, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowercase :Tuple = model.module if hasattr(lowerCamelCase, "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowercase :Any = os.path.join(args.output_dir, lowerCamelCase )
lowercase :Optional[int] = os.path.join(args.output_dir, lowerCamelCase )
torch.save(model_to_save.state_dict(), lowerCamelCase )
model_to_save.config.to_json_file(lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowercase :Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowercase :Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCamelCase )
if args.do_eval:
model.eval()
lowercase , lowercase :Optional[Any] = 0, 0
lowercase , lowercase :Optional[Any] = 0, 0
for batch in tqdm(lowerCamelCase, desc="Evaluating" ):
lowercase :str = tuple(t.to(lowerCamelCase ) for t in batch )
lowercase , lowercase , lowercase , lowercase :str = batch
with torch.no_grad():
lowercase , lowercase , lowercase , lowercase :List[Any] = model(
lowerCamelCase, mc_token_ids=lowerCamelCase, lm_labels=lowerCamelCase, mc_labels=lowerCamelCase )
lowercase :Optional[Any] = mc_logits.detach().cpu().numpy()
lowercase :List[Any] = mc_labels.to("cpu" ).numpy()
lowercase :Optional[int] = accuracy(lowerCamelCase, lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowercase :List[Any] = eval_loss / nb_eval_steps
lowercase :Any = eval_accuracy / nb_eval_examples
lowercase :Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowercase :Dict = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowercase :Any = os.path.join(args.output_dir, "eval_results.txt" )
with open(lowerCamelCase, "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s", lowerCamelCase, str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 158
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self: Dict , _lowerCAmelCase: Dict , _lowerCAmelCase: Any=13 , _lowerCAmelCase: List[str]=10 , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Union[str, Any]=5 , _lowerCAmelCase: str=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: Union[str, Any]=0.9 , _lowerCAmelCase: int=None , ):
lowercase :Dict = parent
lowercase :Optional[int] = batch_size
lowercase :List[Any] = image_size
lowercase :int = num_channels
lowercase :Any = patch_size
lowercase :str = tubelet_size
lowercase :Optional[Any] = num_frames
lowercase :Optional[Any] = is_training
lowercase :Tuple = use_labels
lowercase :Union[str, Any] = hidden_size
lowercase :Any = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[int] = intermediate_size
lowercase :Union[str, Any] = hidden_act
lowercase :int = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :List[str] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Optional[Any] = mask_ratio
lowercase :List[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase :List[str] = (image_size // patch_size) ** 2
lowercase :Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase :Optional[Any] = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase :Dict = None
if self.use_labels:
lowercase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self: List[str] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ):
lowercase :List[Any] = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :int = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Optional[int] ):
lowercase :str = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase :Tuple = torch.ones((self.num_masks,) )
lowercase :str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase :Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase :List[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
lowercase :Any = mask.sum().item()
lowercase :Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Union[str, Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase :str = config_and_inputs
lowercase :Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_a = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :str = VideoMAEModelTester(self )
lowercase :str = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=False ):
lowercase :Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase :Tuple = torch.ones((self.model_tester.num_masks,) )
lowercase :Optional[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase :List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase :Optional[int] = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self: str ):
pass
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase :List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Tuple = model_class(_lowerCAmelCase )
lowercase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Optional[int] = [*signature.parameters.keys()]
lowercase :List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: Any ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :int = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
if not self.has_attentions:
pass
else:
lowercase , lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :Optional[Any] = True
for model_class in self.all_model_classes:
lowercase :Tuple = self.model_tester.seq_length - self.model_tester.num_masks
lowercase :Dict = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase :Any = True
lowercase :Tuple = False
lowercase :str = True
lowercase :List[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :Any = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase :Optional[Any] = True
lowercase :str = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :List[Any] = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase :int = len(_lowerCAmelCase )
# Check attention is always last and order is fine
lowercase :int = True
lowercase :Union[str, Any] = True
lowercase :int = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
lowercase :Tuple = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self: str ):
def check_hidden_states_output(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :Dict = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :Tuple = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :Tuple = outputs.hidden_states
lowercase :Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase :str = self.model_tester.seq_length - self.model_tester.num_masks
lowercase :List[str] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Any = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :Optional[Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
pass
def UpperCAmelCase__ ( ):
lowercase :str = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" )
lowercase :List[str] = np.load(lowerCamelCase )
return list(lowerCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self: Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_lowerCAmelCase )
lowercase :Tuple = self.default_image_processor
lowercase :Optional[Any] = prepare_video()
lowercase :str = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase :List[str] = model(**_lowerCAmelCase )
# verify the logits
lowercase :Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase :Optional[int] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :List[str] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_lowerCAmelCase )
lowercase :List[Any] = self.default_image_processor
lowercase :str = prepare_video()
lowercase :Optional[int] = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
lowercase :Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowercase :str = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase :Optional[Any] = model(**_lowerCAmelCase )
# verify the logits
lowercase :str = torch.Size([1, 14_08, 15_36] )
lowercase :Union[str, Any] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase :Union[str, Any] = torch.tensor([0.51_42] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase :Any = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
lowercase :List[str] = model(**_lowerCAmelCase )
lowercase :Tuple = torch.tensor(torch.tensor([0.64_69] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
| 158
| 1
|
import argparse
import copy
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] ={}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCamelCase : List[str] =[]
_list.append([line.split()[1], line.split()[2]] )
_lowerCamelCase : Union[str, Any] =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCamelCase : int =[]
_list.append([line.split()[0], line.split()[2]] )
_lowerCamelCase : Tuple =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ) as f:
_lowerCamelCase : Any =f.read(1 )
_lowerCamelCase : str =start_node
_lowerCamelCase : Optional[Any] =[]
_lowerCamelCase : List[str] =start_node
_lowerCamelCase : int =0
while visiting not in first_solution:
_lowerCamelCase : Optional[int] =10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
_lowerCamelCase : Dict =k[1]
_lowerCamelCase : Tuple =k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Union[str, Any] =best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCamelCase : str =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def a_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : Any =[]
for n in solution[1:-1]:
_lowerCamelCase : int =solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
_lowerCamelCase : str =solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
_lowerCamelCase : Union[str, Any] =copy.deepcopy(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : str =kn
_lowerCamelCase : Optional[int] =n
_lowerCamelCase : Union[str, Any] =0
for k in _tmp[:-1]:
_lowerCamelCase : Dict =_tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCamelCase : Optional[Any] =distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCamelCase : Union[str, Any] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =1
_lowerCamelCase : Optional[Any] =first_solution
_lowerCamelCase : Tuple =[]
_lowerCamelCase : Optional[int] =distance_of_first_solution
_lowerCamelCase : str =solution
while count <= iters:
_lowerCamelCase : List[Any] =find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] =0
_lowerCamelCase : Union[str, Any] =neighborhood[index_of_best_solution]
_lowerCamelCase : Union[str, Any] =len(SCREAMING_SNAKE_CASE__ ) - 1
_lowerCamelCase : List[str] =False
while not found:
_lowerCamelCase : str =0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
_lowerCamelCase : List[str] =best_solution[i]
_lowerCamelCase : int =solution[i]
break
_lowerCamelCase : Optional[int] =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCamelCase : Tuple =True
_lowerCamelCase : int =best_solution[:-1]
_lowerCamelCase : List[Any] =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCamelCase : int =cost
_lowerCamelCase : Tuple =solution
else:
_lowerCamelCase : List[str] =index_of_best_solution + 1
_lowerCamelCase : Any =neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
_lowerCamelCase : Tuple =count + 1
return best_solution_ever, best_cost
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =generate_neighbours(args.File )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase , _lowerCamelCase : str =tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 199
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = '▁'
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =BigBirdTokenizer
UpperCamelCase__ : Union[str, Any] =BigBirdTokenizerFast
UpperCamelCase__ : Any =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
_lowerCamelCase : List[Any] =self.tokenizer_class(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] ='<s>'
_lowerCamelCase : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(lowercase_ ) , 1004 )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Union[str, Any] =self.get_tokenizer()
_lowerCamelCase : int =self.get_rust_tokenizer()
_lowerCamelCase : int ='I was born in 92000, and this is falsé.'
_lowerCamelCase : int =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[Any] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : str =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : str =self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] =tokenizer.encode(lowercase_ )
_lowerCamelCase : List[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : str =BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : int =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Any =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] ='Hello World!'
_lowerCamelCase : Tuple =[65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
_lowerCamelCase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCamelCase : Union[str, Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCamelCase : List[Any] =' '.join(lowercase_ )
_lowerCamelCase : List[str] =self.big_tokenizer.encode_plus(lowercase_ , return_tensors='pt' , return_token_type_ids=lowercase_ )
_lowerCamelCase : Optional[int] =self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase_ )
_lowerCamelCase : List[str] =BigBirdConfig(attention_type='original_full' )
_lowerCamelCase : Optional[Any] =BigBirdModel(lowercase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
_lowerCamelCase : int =tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ={'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 199
| 1
|
from collections.abc import Sequence
def lowerCAmelCase_ (lowerCAmelCase__: Sequence[float] , lowerCAmelCase__: bool = False ):
"""simple docstring"""
if not arr:
return 0
UpperCAmelCase_: Optional[Any] = 0 if allow_empty_subarrays else float("""-inf""" )
UpperCAmelCase_: str = 0.0
for num in arr:
UpperCAmelCase_: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase_: Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a : List[str] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 82
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: List[Any] = parent
UpperCAmelCase_: int = batch_size
UpperCAmelCase_: Any = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: Dict = use_input_mask
UpperCAmelCase_: Optional[int] = use_token_type_ids
UpperCAmelCase_: Dict = use_labels
UpperCAmelCase_: List[str] = vocab_size
UpperCAmelCase_: Union[str, Any] = hidden_size
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[int] = intermediate_size
UpperCAmelCase_: Tuple = hidden_act
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: List[str] = attention_probs_dropout_prob
UpperCAmelCase_: Any = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: List[str] = type_sequence_label_size
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Optional[int] = num_labels
UpperCAmelCase_: Union[str, Any] = num_choices
UpperCAmelCase_: Any = scope
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: str = None
if self.use_input_mask:
UpperCAmelCase_: Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: int = None
if self.use_token_type_ids:
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Dict = None
UpperCAmelCase_: List[str] = None
UpperCAmelCase_: Any = None
if self.use_labels:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, use_stable_embedding=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: List[Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: List[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCAmelCase_: Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
UpperCAmelCase_: str = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, use_cache=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_: Tuple = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_: str = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCAmelCase_: str = torch.cat([input_mask, next_mask], dim=-1 )
UpperCAmelCase_: Dict = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
UpperCAmelCase_: Tuple = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_: str = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCAmelCase_: str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-3 ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): List[Any] = config_and_inputs
UpperCAmelCase_: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> int:
UpperCAmelCase_: str = OpenLlamaModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: int = 3
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = 3
UpperCAmelCase_: Optional[Any] = """single_label_classification"""
UpperCAmelCase_: Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase_: str = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: List[str] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Optional[int] = 3
UpperCAmelCase_: int = """multi_label_classification"""
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: int = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_: Optional[Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __snake_case (self ) -> int:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = ids_tensor([1, 10], config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Any = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
original_model.eval()
UpperCAmelCase_: Any = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Tuple = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Optional[Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase_: int = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
scaled_model.to(SCREAMING_SNAKE_CASE_ )
scaled_model.eval()
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
| 82
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _a (__magic_name__ ):
'''simple docstring'''
def __get__( self , A__ , A__=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
A__ : List[str] = """__cached_""" + self.fget.__name__
A__ : List[Any] = getattr(A__ , A__ , A__ )
if cached is None:
A__ : Optional[int] = self.fget(A__ )
setattr(A__ , A__ , A__ )
return cached
def UpperCamelCase (lowercase_: Any ) -> List[Any]:
A__ : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Dict:
if is_torch_fx_proxy(lowercase_ ):
return True
if is_torch_available():
import torch
if isinstance(lowercase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowercase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowercase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowercase_ , np.ndarray )
def UpperCamelCase (lowercase_: Dict ) -> Tuple:
return isinstance(lowercase_ , np.ndarray )
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
return _is_numpy(lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Any:
import torch
return isinstance(lowercase_ , torch.Tensor )
def UpperCamelCase (lowercase_: List[str] ) -> List[Any]:
return False if not is_torch_available() else _is_torch(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> List[str]:
import torch
return isinstance(lowercase_ , torch.device )
def UpperCamelCase (lowercase_: Optional[Any] ) -> int:
return False if not is_torch_available() else _is_torch_device(lowercase_ )
def UpperCamelCase (lowercase_: Tuple ) -> Any:
import torch
if isinstance(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , lowercase_ ):
A__ : Union[str, Any] = getattr(lowercase_ , lowercase_ )
else:
return False
return isinstance(lowercase_ , torch.dtype )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> int:
return False if not is_torch_available() else _is_torch_dtype(lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Any:
import tensorflow as tf
return isinstance(lowercase_ , tf.Tensor )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
return False if not is_tf_available() else _is_tensorflow(lowercase_ )
def UpperCamelCase (lowercase_: Any ) -> int:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowercase_ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(lowercase_ )
return type(lowercase_ ) == tf.Tensor
def UpperCamelCase (lowercase_: Any ) -> int:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowercase_ )
def UpperCamelCase (lowercase_: Dict ) -> Any:
import jax.numpy as jnp # noqa: F811
return isinstance(lowercase_ , jnp.ndarray )
def UpperCamelCase (lowercase_: Any ) -> Optional[Any]:
return False if not is_flax_available() else _is_jax(lowercase_ )
def UpperCamelCase (lowercase_: Optional[int] ) -> int:
if isinstance(lowercase_ , (dict, UserDict) ):
return {k: to_py_obj(lowercase_ ) for k, v in obj.items()}
elif isinstance(lowercase_ , (list, tuple) ):
return [to_py_obj(lowercase_ ) for o in obj]
elif is_tf_tensor(lowercase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowercase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowercase_ ):
return np.asarray(lowercase_ ).tolist()
elif isinstance(lowercase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase (lowercase_: Any ) -> Any:
if isinstance(lowercase_ , (dict, UserDict) ):
return {k: to_numpy(lowercase_ ) for k, v in obj.items()}
elif isinstance(lowercase_ , (list, tuple) ):
return np.array(lowercase_ )
elif is_tf_tensor(lowercase_ ):
return obj.numpy()
elif is_torch_tensor(lowercase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowercase_ ):
return np.asarray(lowercase_ )
else:
return obj
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : Optional[int] = fields(self )
# Safety and consistency checks
if not len(A__ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
A__ : Optional[Any] = getattr(self , class_fields[0].name )
A__ : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A__ ):
if isinstance(A__ , A__ ):
A__ : Dict = first_field.items()
A__ : Union[str, Any] = True
else:
try:
A__ : Union[str, Any] = iter(A__ )
A__ : int = True
except TypeError:
A__ : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A__ ):
if (
not isinstance(A__ , (list, tuple) )
or not len(A__ ) == 2
or not isinstance(element[0] , A__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ : int = element[1]
elif first_field is not None:
A__ : Union[str, Any] = first_field
else:
for field in class_fields:
A__ : Tuple = getattr(self , field.name )
if v is not None:
A__ : str = v
def __delitem__( self , *A__ , **A__ ):
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __A ( self , *A__ , **A__ ):
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __A ( self , *A__ , **A__ ):
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __A ( self , *A__ , **A__ ):
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , A__ ):
if isinstance(A__ , A__ ):
A__ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A__ , A__ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A__ , A__ )
super().__setattr__(A__ , A__ )
def __setitem__( self , A__ , A__ ):
# Will raise a KeyException if needed
super().__setitem__(A__ , A__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A__ , A__ )
def __A ( self ):
return tuple(self[k] for k in self.keys() )
class _a (__magic_name__ , __magic_name__ ):
'''simple docstring'''
@classmethod
def __A ( cls , A__ ):
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''longest'''
UpperCAmelCase__: Optional[int] = '''max_length'''
UpperCAmelCase__: List[str] = '''do_not_pad'''
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''pt'''
UpperCAmelCase__: Dict = '''tf'''
UpperCAmelCase__: Tuple = '''np'''
UpperCAmelCase__: List[Any] = '''jax'''
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : Any = context_managers
A__ : Any = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(A__ )
def __exit__( self , *A__ , **A__ ):
self.stack.__exit__(*A__ , **A__ )
def UpperCamelCase (lowercase_: Dict ) -> Dict:
A__ : str = infer_framework(lowercase_ )
if framework == "tf":
A__ : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ : int = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase (lowercase_: Optional[Any] ) -> List[Any]:
A__ : Optional[int] = model_class.__name__
A__ : int = infer_framework(lowercase_ )
if framework == "tf":
A__ : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase (lowercase_: MutableMapping , lowercase_: str = "" , lowercase_: str = "." ) -> List[Any]:
def _flatten_dict(lowercase_: Union[str, Any] , lowercase_: Union[str, Any]="" , lowercase_: List[Any]="." ):
for k, v in d.items():
A__ : List[str] = str(lowercase_ ) + delimiter + str(lowercase_ ) if parent_key else k
if v and isinstance(lowercase_ , lowercase_ ):
yield from flatten_dict(lowercase_ , lowercase_ , delimiter=lowercase_ ).items()
else:
yield key, v
return dict(_flatten_dict(lowercase_ , lowercase_ , lowercase_ ) )
@contextmanager
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: bool = False ) -> Optional[int]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Optional[Any]=None ) -> int:
if is_numpy_array(lowercase_ ):
return np.transpose(lowercase_ , axes=lowercase_ )
elif is_torch_tensor(lowercase_ ):
return array.T if axes is None else array.permute(*lowercase_ )
elif is_tf_tensor(lowercase_ ):
import tensorflow as tf
return tf.transpose(lowercase_ , perm=lowercase_ )
elif is_jax_tensor(lowercase_ ):
return jnp.transpose(lowercase_ , axes=lowercase_ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowercase_ )}.""" )
def UpperCamelCase (lowercase_: Dict , lowercase_: Union[str, Any] ) -> List[str]:
if is_numpy_array(lowercase_ ):
return np.reshape(lowercase_ , lowercase_ )
elif is_torch_tensor(lowercase_ ):
return array.reshape(*lowercase_ )
elif is_tf_tensor(lowercase_ ):
import tensorflow as tf
return tf.reshape(lowercase_ , lowercase_ )
elif is_jax_tensor(lowercase_ ):
return jnp.reshape(lowercase_ , lowercase_ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowercase_ )}.""" )
def UpperCamelCase (lowercase_: int , lowercase_: List[str]=None ) -> Any:
if is_numpy_array(lowercase_ ):
return np.squeeze(lowercase_ , axis=lowercase_ )
elif is_torch_tensor(lowercase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowercase_ )
elif is_tf_tensor(lowercase_ ):
import tensorflow as tf
return tf.squeeze(lowercase_ , axis=lowercase_ )
elif is_jax_tensor(lowercase_ ):
return jnp.squeeze(lowercase_ , axis=lowercase_ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowercase_ )}.""" )
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any] ) -> str:
if is_numpy_array(lowercase_ ):
return np.expand_dims(lowercase_ , lowercase_ )
elif is_torch_tensor(lowercase_ ):
return array.unsqueeze(dim=lowercase_ )
elif is_tf_tensor(lowercase_ ):
import tensorflow as tf
return tf.expand_dims(lowercase_ , axis=lowercase_ )
elif is_jax_tensor(lowercase_ ):
return jnp.expand_dims(lowercase_ , axis=lowercase_ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowercase_ )}.""" )
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
if is_numpy_array(lowercase_ ):
return np.size(lowercase_ )
elif is_torch_tensor(lowercase_ ):
return array.numel()
elif is_tf_tensor(lowercase_ ):
import tensorflow as tf
return tf.size(lowercase_ )
elif is_jax_tensor(lowercase_ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowercase_ )}.""" )
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> Union[str, Any]:
for key, value in auto_map.items():
if isinstance(lowercase_ , (tuple, list) ):
A__ : Any = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ : Any = f"""{repo_id}--{value}"""
return auto_map
def UpperCamelCase (lowercase_: List[str] ) -> int:
for base_class in inspect.getmro(lowercase_ ):
A__ : Tuple = base_class.__module__
A__ : int = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 192
|
import argparse
import os
import re
A_ : List[str] = 'src/diffusers'
# Pattern that looks at the indentation in a line.
A_ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A_ : int = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A_ : Optional[int] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A_ : List[Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A_ : List[str] = re.compile(r'\[([^\]]+)\]')
def UpperCamelCase (lowercase_: List[str] ) -> Dict:
A__ : Optional[Any] = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase (lowercase_: Dict , lowercase_: Any="" , lowercase_: Any=None , lowercase_: Any=None ) -> Tuple:
A__ : Optional[Any] = 0
A__ : str = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
A__ : Tuple = ["""\n""".join(lines[:index] )]
else:
A__ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : Union[str, Any] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
A__ : Union[str, Any] = [lines[index + 1]]
index += 1
else:
A__ : List[Any] = []
else:
blocks.append("""\n""".join(lowercase_ ) )
A__ : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase (lowercase_: str ) -> str:
def _inner(lowercase_: Union[str, Any] ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase (lowercase_: int , lowercase_: Any=None ) -> str:
# If no key is provided, we use a noop.
def noop(lowercase_: Any ):
return x
if key is None:
A__ : Optional[Any] = noop
# Constants are all uppercase, they go first.
A__ : Optional[int] = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[Any] = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Tuple = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
A__ : Any = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(lowercase_: List[Any] ):
A__ : Tuple = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
A__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
A__ : Dict = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : List[str] = 2 if lines[1].strip() == """[""" else 1
A__ : Any = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : Any = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
A__ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
A__ : Any = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Tuple = keys[:-1]
A__ : List[Any] = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
A__ : int = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: str=True ) -> Any:
with open(lowercase_ , """r""" ) as f:
A__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Tuple = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : int = main_blocks[block_idx]
A__ : Optional[Any] = block.split("""\n""" )
# Get to the start of the imports.
A__ : Any = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Optional[Any] = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : Union[str, Any] = """\n""".join(block_lines[line_idx:-1] )
A__ : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Union[str, Any] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : Optional[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : int = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
A__ : List[Any] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[int] = 0
A__ : Any = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
A__ : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(lowercase_ , """w""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def UpperCamelCase (lowercase_: Any=True ) -> Any:
A__ : Dict = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
A__ : List[Any] = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
A__ : Optional[int] = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(f"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A_ : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 192
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
UpperCAmelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def _a ( self , A_ , A_=0 ) -> Union[str, Any]:
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self ) -> Tuple:
self._test_save_load_local()
def _a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 117
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "MCTCTFeatureExtractor"
UpperCAmelCase__ : str = "AutoTokenizer"
def __init__( self , A_ , A_ ) -> Dict:
super().__init__(A_ , A_ )
__UpperCamelCase =self.feature_extractor
__UpperCamelCase =False
def __call__( self , *A_ , **A_ ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__UpperCamelCase =kwargs.pop('raw_speech' )
else:
__UpperCamelCase =kwargs.pop('audio' , A_ )
__UpperCamelCase =kwargs.pop('sampling_rate' , A_ )
__UpperCamelCase =kwargs.pop('text' , A_ )
if len(A_ ) > 0:
__UpperCamelCase =args[0]
__UpperCamelCase =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__UpperCamelCase =self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
__UpperCamelCase =self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase =encodings['input_ids']
return inputs
def _a ( self , *A_ , **A_ ) -> str:
return self.tokenizer.batch_decode(*A_ , **A_ )
def _a ( self , *A_ , **A_ ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
__UpperCamelCase =kwargs.pop('input_features' , A_ )
__UpperCamelCase =kwargs.pop('labels' , A_ )
if len(A_ ) > 0:
__UpperCamelCase =args[0]
__UpperCamelCase =args[1:]
if input_features is not None:
__UpperCamelCase =self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
__UpperCamelCase =self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCamelCase =labels['input_ids']
return input_features
def _a ( self , *A_ , **A_ ) -> Optional[int]:
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def _a ( self ) -> str:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer
yield
__UpperCamelCase =self.feature_extractor
__UpperCamelCase =False
| 117
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class _snake_case ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 'roberta-prelayernorm'
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Dict = vocab_size
a :Any = hidden_size
a :int = num_hidden_layers
a :Any = num_attention_heads
a :int = hidden_act
a :Dict = intermediate_size
a :int = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :Any = max_position_embeddings
a :Tuple = type_vocab_size
a :Optional[int] = initializer_range
a :Optional[int] = layer_norm_eps
a :Union[str, Any] = position_embedding_type
a :Tuple = use_cache
a :str = classifier_dropout
class _snake_case ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task == "multiple-choice":
a :str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a :Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 94
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_vision_model'''
def __init__( self , A=768 , A=12 , A=3 , A=16 , A=288 , A=1 , A=1e-05 , A=False , A=True , A=False , **A , ) -> Dict:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = stop_gradient
_SCREAMING_SNAKE_CASE = share_layernorm
_SCREAMING_SNAKE_CASE = remove_last_layer
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_text_model'''
def __init__( self , A=5_0265 , A=768 , A=12 , A=12 , A=1 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=514 , A=1 , A=1e-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> Union[str, Any]:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower'''
def __init__( self , A=True , A="gelu" , A=768 , A=1 , A=1e-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Tuple:
# TODO: remove this once the Hub files are updated.
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_config_dict""" , A )
_SCREAMING_SNAKE_CASE = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
_SCREAMING_SNAKE_CASE = share_cross_modal_transformer_layers
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = share_link_tower_layers
_SCREAMING_SNAKE_CASE = link_tower_type
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = tie_word_embeddings
_SCREAMING_SNAKE_CASE = init_layernorm_from_vision_encoder
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_SCREAMING_SNAKE_CASE = BridgeTowerTextConfig(**A )
_SCREAMING_SNAKE_CASE = BridgeTowerVisionConfig(**A )
@classmethod
def snake_case_( cls , A , A , **A ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 58
| 0
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
UpperCamelCase_: List[str] = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , _lowerCamelCase , standard_warn=_lowerCamelCase )
UpperCamelCase_: Tuple = dict(scheduler.config )
UpperCamelCase_: Dict = 1
UpperCamelCase_: List[Any] = FrozenDict(_lowerCamelCase )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase_: Union[str, Any] = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , _lowerCamelCase , standard_warn=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = dict(scheduler.config )
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[Any] = FrozenDict(_lowerCamelCase )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_lowerCamelCase , segmentation_processor=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , )
def _a ( self , _lowerCamelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_: Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _a ( self ):
self.enable_attention_slicing(_lowerCamelCase )
def _a ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase_: Optional[Any] = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_1_2 , _lowerCamelCase = 5_0 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
UpperCamelCase_: Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
UpperCamelCase_: Union[str, Any] = self.segmentation_model(**_lowerCamelCase )
UpperCamelCase_: int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase_: Dict = self.numpy_to_pil(_lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase_: int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , )
| 292
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 292
| 1
|
'''simple docstring'''
import numpy as np
import qiskit
def __a(SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int | None = None ):
'''simple docstring'''
_lowerCAmelCase = np.random.default_rng(seed=SCREAMING_SNAKE_CASE_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
if alice_state[index] == 1:
bbaa_circ.x(SCREAMING_SNAKE_CASE_ )
if alice_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
if bob_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1 , seed_simulator=SCREAMING_SNAKE_CASE_ )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(SCREAMING_SNAKE_CASE_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(SCREAMING_SNAKE_CASE_ ) >= key_len else gen_key.ljust(SCREAMING_SNAKE_CASE_ , "0" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 158
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : str = {"vocab_file": "spiece.model"}
_lowerCAmelCase : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_lowerCAmelCase : Optional[Any] = {
"google/bigbird-roberta-base": 40_96,
"google/bigbird-roberta-large": 40_96,
"google/bigbird-base-trivia-itc": 40_96,
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , A : Union[str, Any] , A : List[str]="<unk>" , A : List[Any]="<s>" , A : Dict="</s>" , A : Any="<pad>" , A : List[str]="[SEP]" , A : Optional[Any]="[MASK]" , A : List[str]="[CLS]" , A : Optional[Dict[str, Any]] = None , **A : Optional[int] , ):
_UpperCAmelCase : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
_UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
_UpperCAmelCase : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
_UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
_UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
_UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : int = vocab_file
_UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def snake_case_ ( self : List[str] ):
return self.sp_model.get_piece_size()
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
_UpperCAmelCase : Optional[Any] = self.__dict__.copy()
_UpperCAmelCase : str = None
return state
def __setstate__( self : Optional[Any] , A : Any ):
_UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self : Union[str, Any] , A : str ):
return self.sp_model.encode(A , out_type=A )
def snake_case_ ( self : int , A : Optional[Any] ):
return self.sp_model.piece_to_id(A )
def snake_case_ ( self : str , A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = self.sp_model.IdToPiece(A )
return token
def snake_case_ ( self : Optional[Any] , A : Union[str, Any] ):
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = []
else:
current_sub_tokens.append(A )
_UpperCAmelCase : Dict = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def snake_case_ ( self : Union[str, Any] , A : List[int] , A : bool = False , A : bool = None , A : bool = True , **A : Dict , ):
_UpperCAmelCase : int = kwargs.pop("use_source_tokenizer" , A )
_UpperCAmelCase : Dict = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Tuple = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
_UpperCAmelCase : Union[str, Any] = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCAmelCase : List[Any] = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(A ) )
else:
_UpperCAmelCase : Optional[Any] = "".join(A )
_UpperCAmelCase : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCAmelCase : str = self.clean_up_tokenization(A )
return clean_text
else:
return text
def snake_case_ ( self : Dict , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Tuple = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , "wb" ) as fi:
_UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def snake_case_ ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
_UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
_UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 352
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict=28_123 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_UpperCAmelCase : Any = set()
_UpperCAmelCase : List[str] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 202
| 0
|
from __future__ import annotations
from collections.abc import MutableSequence
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
if len(_snake_case ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
_lowerCAmelCase = list(_snake_case )
_lowerCAmelCase = degree
def __add__( self , _snake_case ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
_lowerCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _snake_case )
else:
_lowerCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _snake_case )
def __sub__( self , _snake_case ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
"""simple docstring"""
_lowerCAmelCase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_snake_case )
return polynomial
def __repr__( self ):
"""simple docstring"""
return self.__str__()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _snake_case )
def snake_case ( self , _snake_case = 0 ):
"""simple docstring"""
_lowerCAmelCase = [0] * (self.degree + 2)
_lowerCAmelCase = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _snake_case )
def __eq__( self , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _snake_case ):
"""simple docstring"""
return not self.__eq__(_snake_case )
| 82
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , **_snake_case ):
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCAmelCase = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
_lowerCAmelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(_snake_case , """html.parser""" )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_lowerCAmelCase = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """"""
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
_lowerCAmelCase = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F'but is of type {type(_snake_case )}.' )
_lowerCAmelCase = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
_lowerCAmelCase = [html_strings]
# Get nodes + xpaths
_lowerCAmelCase = []
_lowerCAmelCase = []
for html_string in html_strings:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
_lowerCAmelCase = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
_lowerCAmelCase = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
_lowerCAmelCase = {"""nodes""": nodes, """xpaths""": xpaths}
_lowerCAmelCase = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs
| 82
| 1
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__A = logging.getLogger(__name__)
class snake_case :
def __init__( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = False
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any])-> List[str]:
'''simple docstring'''
if not self.initialized:
__lowerCAmelCase: List[Any] = RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
__lowerCAmelCase: Any = True
def lowercase_ ( self : str)-> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowercase_ ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.retriever._main_retrieve(UpperCamelCase__ , UpperCamelCase__)
return doc_ids, retrieved_doc_embeds
class snake_case ( __snake_case ):
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=None)-> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(UpperCamelCase__) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py ")
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
__lowerCAmelCase: Dict = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
for worker in self.retrieval_workers
])
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
logger.info("initializing retrieval")
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase_ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__lowerCAmelCase: Any = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = ray.get(random_worker.retrieve.remote(UpperCamelCase__ , UpperCamelCase__))
else:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__)
@classmethod
def lowercase_ ( cls : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
return super(UpperCamelCase__ , cls).get_tokenizers(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__)
@classmethod
def lowercase_ ( cls : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = kwargs.pop("config" , UpperCamelCase__) or RagConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Dict = RagTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = rag_tokenizer.question_encoder
__lowerCAmelCase: List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
__lowerCAmelCase: List[Any] = "custom"
__lowerCAmelCase: Any = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__)
else:
__lowerCAmelCase: Tuple = cls._build_index(UpperCamelCase__)
return cls(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , retrieval_workers=UpperCamelCase__ , index=UpperCamelCase__ , )
| 108
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_0**-1_0 ) -> float:
__lowerCAmelCase: Union[str, Any] = a
while True:
__lowerCAmelCase: Optional[int] = Decimal(__SCREAMING_SNAKE_CASE ) - (
Decimal(eval(__SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(__SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(__SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 108
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case__ : Union[str, Any] = 'bart'
snake_case__ : Dict = True
@st.cache(allow_output_mutation=lowerCamelCase )
def _a ( ) -> Optional[int]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__A = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__A = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__A = qar_model.eval()
else:
__A , __A = (None, None)
if MODEL_TYPE == "bart":
__A = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__A = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__A = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__A = sas_model.eval()
else:
__A , __A = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase )
def _a ( ) -> Any:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__A = faiss.StandardGpuResources()
__A = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__A = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
__A = faiss.IndexFlatIP(1_28 )
__A = faiss.index_cpu_to_gpu(lowerCamelCase , 1 , lowerCamelCase )
wikiaab_gpu_index_flat.add(lowerCamelCase ) # TODO fix for larger GPU
else:
__A , __A = (None, None)
__A = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase )
def _a ( ) -> Optional[Any]:
'''simple docstring'''
__A = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__A = elia['''train_eli5''']
__A = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
__A = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(lowerCamelCase )
return (elia_train, eli5_train_q_index)
snake_case__ , snake_case__ , snake_case__ : str = load_indexes()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = load_models()
snake_case__ , snake_case__ : List[Any] = load_train_data()
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[int]=10 ) -> Union[str, Any]:
'''simple docstring'''
__A = embed_questions_for_retrieval([question] , lowerCamelCase , lowerCamelCase )
__A , __A = eli5_train_q_index.search(lowerCamelCase , lowerCamelCase )
__A = [elia_train[int(lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: Dict="wiki40b" , lowerCamelCase: Optional[int]="dense" , lowerCamelCase: str=10 ) -> str:
'''simple docstring'''
if source == "none":
__A , __A = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__A , __A = query_qa_dense_index(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A , __A = query_es_index(
lowerCamelCase , lowerCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=lowerCamelCase , )
__A = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__A = '''question: {} context: {}'''.format(lowerCamelCase , lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase : None),
} )
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: List[Any] , lowerCamelCase: str , lowerCamelCase: Dict=64 , lowerCamelCase: Optional[int]=2_56 , lowerCamelCase: Dict=False , lowerCamelCase: Union[str, Any]=2 , lowerCamelCase: Any=0.95 , lowerCamelCase: List[str]=0.8 ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
__A = qa_sas_generate(
lowerCamelCase , lowerCamelCase , lowerCamelCase , num_answers=1 , num_beams=lowerCamelCase , min_len=lowerCamelCase , max_len=lowerCamelCase , do_sample=lowerCamelCase , temp=lowerCamelCase , top_p=lowerCamelCase , top_k=lowerCamelCase , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
snake_case__ : List[str] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
snake_case__ : Optional[int] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case__ : List[str] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case__ : Union[str, Any] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
snake_case__ : int = st.sidebar.checkbox('Demo options')
if demo_options:
snake_case__ : List[Any] = st.sidebar.selectbox(
'',
action_list,
index=3,
)
snake_case__ : Optional[Any] = action_list.index(action_st)
snake_case__ : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
snake_case__ : List[str] = show_type == 'Show full text of passages'
else:
snake_case__ : Optional[int] = 3
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
snake_case__ : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
snake_case__ : str = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
snake_case__ : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
snake_case__ : Union[str, Any] = 'wiki40b'
snake_case__ : Optional[Any] = 'dense'
snake_case__ : int = 'beam'
snake_case__ : Tuple = 2
snake_case__ : List[Any] = 64
snake_case__ : List[str] = 256
snake_case__ : Optional[int] = None
snake_case__ : str = None
snake_case__ : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
snake_case__ : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
snake_case__ : Union[str, Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
snake_case__ : List[Any] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
snake_case__ : List[str] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
snake_case__ : int = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case__ : int = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case__ : int = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case__ : Optional[int] = None
# start main text
snake_case__ : Optional[Any] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
snake_case__ : Union[str, Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case__ : List[Any] = st.text_input('Enter your question here:', '')
else:
snake_case__ : List[str] = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case__ , snake_case__ : List[str] = make_support(question, source=wiki_source, method='dense', n_results=10)
snake_case__ , snake_case__ : str = make_support(question, source=wiki_source, method='sparse', n_results=10)
snake_case__ : Union[str, Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case__ : Optional[Any] = support_list[:10]
snake_case__ : List[str] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
snake_case__ , snake_case__ : Dict = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case__ , snake_case__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
snake_case__ : List[Any] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
snake_case__ : int = res[1].strip()
if sec_titles == "":
snake_case__ : Optional[Any] = '[{}]({})'.format(res[0], wiki_url)
else:
snake_case__ : Dict = sec_titles.split(' & ')
snake_case__ : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
snake_case__ : Any = find_nearest_training(question)
snake_case__ : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
snake_case__ : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
snake_case__ : Optional[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 117
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: int=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Tuple=None , lowerCamelCase: Union[str, Any]=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
__A = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__A = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__A = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__(self :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :Dict=13 , _UpperCamelCase :Optional[Any]=7 , _UpperCamelCase :str=True , _UpperCamelCase :Tuple=False , _UpperCamelCase :int=99 , _UpperCamelCase :int=16 , _UpperCamelCase :int=2 , _UpperCamelCase :int=4 , _UpperCamelCase :str=4 , _UpperCamelCase :Dict="gelu" , _UpperCamelCase :int=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Union[str, Any]=32 , _UpperCamelCase :Any=2 , _UpperCamelCase :Union[str, Any]=1 , _UpperCamelCase :Tuple=0 , _UpperCamelCase :List[str]=0.0_2 , )-> str:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
__A = initializer_range
def _lowerCAmelCase (self :Optional[int] )-> Union[str, Any]:
__A = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__A = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__A = shift_tokens_right(_UpperCamelCase , 1 , 2 )
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
__A = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
__A , __A = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] )-> str:
__A = 20
__A = model_class_name(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
__A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
__A = model.decode(_UpperCamelCase , _UpperCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :List[str] , _UpperCamelCase :Any )-> Dict:
__A = 20
__A = model_class_name(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__A = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = 99
def _lowerCAmelCase (self :Dict )-> int:
__A = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__A = input_ids.shape[0]
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCAmelCase (self :Any )-> List[Any]:
__A , __A , __A = self._get_config_and_data()
__A = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
__A = lm_model(input_ids=_UpperCamelCase )
__A = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def _lowerCAmelCase (self :int )-> Dict:
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__A = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
__A = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__A = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__A = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
__A = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def _lowerCAmelCase (self :Tuple )-> Tuple:
__A = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__A = shift_tokens_right(_UpperCamelCase , 1 , 2 )
__A = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
__A = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( _lowerCamelCase , unittest.TestCase , _lowerCamelCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCAmelCase (self :List[str] )-> Optional[int]:
__A = FlaxBlenderbotModelTester(self )
def _lowerCAmelCase (self :List[str] )-> List[str]:
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> List[str]:
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> Union[str, Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
__A = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase :int , _UpperCamelCase :int=None , **_UpperCamelCase :Dict ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
__A = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = model_class(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__A = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase :str , _UpperCamelCase :Tuple , _UpperCamelCase :Dict ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
__A = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCAmelCase (self :int )-> Optional[int]:
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__A = np.ones((1, 1) ) * model.config.eos_token_id
__A = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def _lowerCAmelCase (self :Dict )-> List[str]:
__A = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__A = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__A = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCamelCase )
__A = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__A = ['''Sam''']
__A = tokenizer(_UpperCamelCase , return_tensors='''jax''' )
__A = model.generate(**_UpperCamelCase , **_UpperCamelCase )
__A = '''Sam is a great name. It means "sun" in Gaelic.'''
__A = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 117
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , ) -> Optional[Any]:
__UpperCamelCase :Optional[int] = parent
__UpperCamelCase :Optional[int] = 13
__UpperCamelCase :Dict = 7
__UpperCamelCase :Optional[int] = True
__UpperCamelCase :List[str] = True
__UpperCamelCase :List[Any] = True
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :Any = True
__UpperCamelCase :Optional[int] = False
__UpperCamelCase :Any = False
__UpperCamelCase :str = False
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :Optional[int] = 99
__UpperCamelCase :Any = 0
__UpperCamelCase :List[Any] = 32
__UpperCamelCase :int = 2
__UpperCamelCase :Optional[Any] = 4
__UpperCamelCase :Dict = 0.1
__UpperCamelCase :Optional[Any] = 0.1
__UpperCamelCase :str = 512
__UpperCamelCase :Any = 16
__UpperCamelCase :str = 2
__UpperCamelCase :Dict = 0.02
__UpperCamelCase :List[Any] = 3
__UpperCamelCase :Optional[int] = 4
__UpperCamelCase :Tuple = '''last'''
__UpperCamelCase :Any = True
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Tuple = 0
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
__UpperCamelCase :Tuple = None
if self.use_input_lengths:
__UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :Any = None
if self.use_token_type_ids:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Dict = None
__UpperCamelCase :List[str] = None
__UpperCamelCase :Union[str, Any] = None
if self.use_labels:
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
__UpperCamelCase :str = TFFlaubertModel(config=__lowercase)
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
__UpperCamelCase :Optional[int] = [input_ids, input_mask]
__UpperCamelCase :Union[str, Any] = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :str = TFFlaubertWithLMHeadModel(__lowercase)
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :Any = TFFlaubertForQuestionAnsweringSimple(__lowercase)
__UpperCamelCase :str = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__UpperCamelCase :int = model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :int = TFFlaubertForSequenceClassification(__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :int = TFFlaubertForTokenClassification(config=__lowercase)
__UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Any = self.num_choices
__UpperCamelCase :Any = TFFlaubertForMultipleChoice(config=__lowercase)
__UpperCamelCase :List[str] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Optional[Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Tuple = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCamelCase :List[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Any = config_and_inputs
__UpperCamelCase :int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : Tuple = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ : Tuple = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : Tuple = False
a__ : Any = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = TFFlaubertModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , emb_dim=37)
def UpperCamelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Union[str, Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Any = TFFlaubertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
__UpperCamelCase :Tuple = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase :Union[str, Any] = model(__lowercase)[0]
__UpperCamelCase :str = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape , __lowercase)
# compare the actual values for a slice.
__UpperCamelCase :Optional[int] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 105
| 1
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :List[Any] ):
A = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase ( self :List[Any] ):
A = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase ( self :str ):
A = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase ( self :Dict ):
A = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase ( self :Tuple ):
A = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase ( self :Optional[Any] ):
A = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A = "fp16"
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase ( self :List[str] ):
A = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A = "fp16"
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase ( self :str ):
# pass variant but use the non-variant filenames
A = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
A = "fp16"
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase ( self :Optional[int] ):
A = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = "fp16"
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase ( self :Optional[int] ):
A = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
A = "fp16"
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase ( self :Any ):
# pass variant but use the non-variant filenames
A = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
A = "fp16"
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase ( self :List[Any] ):
A = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A = "fp16"
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
| 292
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=False , ) ->str:
'''simple docstring'''
lowerCamelCase__: str =4
lowerCamelCase__: Dict =32
lowerCamelCase__: int =(32, 32)
lowerCamelCase__: Optional[int] =torch.manual_seed(0)
lowerCamelCase__: Dict =torch.device(UpperCAmelCase_)
lowerCamelCase__: int =(batch_size, num_channels) + sizes
lowerCamelCase__: Union[str, Any] =randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] ={"hidden_states": hidden_states}
if include_temb:
lowerCamelCase__: Any =128
lowerCamelCase__: Any =randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase_ , device=UpperCAmelCase_)
if include_res_hidden_states_tuple:
lowerCamelCase__: Tuple =torch.manual_seed(1)
lowerCamelCase__: int =(randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_),)
if include_encoder_hidden_states:
lowerCamelCase__: List[Any] =floats_tensor((batch_size, 32, 32)).to(UpperCAmelCase_)
if include_skip_sample:
lowerCamelCase__: int =randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase_ , device=UpperCAmelCase_)
return dummy_input
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ={
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
lowerCamelCase__: Any =32
if self.block_type == "mid":
init_dict.pop("out_channels")
lowerCamelCase__: Tuple =self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__: Optional[int] =self.block_class(**UpperCAmelCase_)
unet_block.to(UpperCAmelCase_)
unet_block.eval()
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =unet_block(**UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =output[0]
self.assertEqual(output.shape , self.output_shape)
lowerCamelCase__: Optional[Any] =output[0, -1, -3:, -3:]
lowerCamelCase__: str =torch.tensor(UpperCAmelCase_).to(UpperCAmelCase_)
assert torch_all_close(output_slice.flatten() , UpperCAmelCase_ , atol=5E-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__: Optional[int] =self.block_class(**UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: Any =model(**UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[Any] =output[0]
lowerCamelCase__: Dict =torch.device(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =randn_tensor(output.shape , device=UpperCAmelCase_)
lowerCamelCase__: Dict =torch.nn.functional.mse_loss(UpperCAmelCase_ , UpperCAmelCase_)
loss.backward()
| 362
|
from math import pow
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase__: Optional[Any] =int(pow(__a , __a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase__ , lowerCamelCase__: int =backtrack(
__a , __a , current_number + 1 , __a , __a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase__ , lowerCamelCase__: Dict =backtrack(
__a , __a , current_number + 1 , __a , __a )
return current_sum, solutions_count
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(__a , __a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
| 0
|
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def a_ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 307
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_A : int = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class a__ ( unittest.TestCase, a_ ):
def __magic_name__ ( self ):
lowercase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
lowercase : Dict = load_tool("text-question-answering" , remote=_a )
def __magic_name__ ( self ):
lowercase : str = self.tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.remote_tool(_a , "What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : int = self.tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.remote_tool(text=_a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_a , "launched the BigScience Research Workshop" )
| 202
| 0
|
__UpperCAmelCase = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 28
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1_024,
'''facebook/bart-large''': 1_024,
'''facebook/bart-large-mnli''': 1_024,
'''facebook/bart-large-cnn''': 1_024,
'''facebook/bart-large-xsum''': 1_024,
'''yjernite/bart_eli5''': 1_024,
}
@lru_cache()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCAmelCase : int = bs[:]
lowerCAmelCase : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : Optional[int] = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Any = set()
lowerCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : str = char
return pairs
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any =VOCAB_FILES_NAMES
a : List[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict =["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
lowerCAmelCase : Optional[int] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowerCAmelCase : Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
lowerCAmelCase : List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
lowerCAmelCase : Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase : Union[str, Any] = json.load(snake_case__ )
lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Union[str, Any] = errors # how to handle errors in decoding
lowerCAmelCase : int = bytes_to_unicode()
lowerCAmelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
lowerCAmelCase : Dict = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Union[str, Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : str = {}
lowerCAmelCase : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : List[str] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase : Tuple = tuple(snake_case__ )
lowerCAmelCase : Tuple = get_pairs(snake_case__ )
if not pairs:
return token
while True:
lowerCAmelCase : Optional[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase : Tuple = bigram
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : str = 0
while i < len(snake_case__ ):
try:
lowerCAmelCase : Dict = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : List[str] = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Optional[Any] = tuple(snake_case__ )
lowerCAmelCase : Any = new_word
if len(snake_case__ ) == 1:
break
else:
lowerCAmelCase : List[str] = get_pairs(snake_case__ )
lowerCAmelCase : List[str] = " ".join(snake_case__ )
lowerCAmelCase : Tuple = word
return word
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = []
for token in re.findall(self.pat , snake_case__ ):
lowerCAmelCase : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(" " ) )
return bpe_tokens
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.decoder.get(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = "".join(snake_case__ )
lowerCAmelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Optional[int] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
lowerCAmelCase : int = 0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase : int = token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self , snake_case__ , snake_case__=False , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = " " + text
return (text, kwargs)
| 108
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''T5Config'''
def a__ ( SCREAMING_SNAKE_CASE : jnp.array , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = jnp.zeros_like(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase : List[str] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = jnp.where(shifted_input_ids == -1_0_0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] ="mt5"
a : Tuple =MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="mt5"
a : Optional[Any] =MTaConfig
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="mt5"
a : Dict =MTaConfig
| 108
| 1
|
from numpy import exp, pi, sqrt
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : int = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : Tuple = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params['''n_layers''']
snake_case_ : int = params['''n_heads''']
snake_case_ : Dict = n_heads // num_shards
snake_case_ : List[Any] = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : Any = 10_000.0
snake_case_ : Any = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : Optional[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : str = n_heads
snake_case_ : Optional[int] = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Optional[Any] = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : Union[str, Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : Optional[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : Union[str, Any] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : int = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : str = inv_freq
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Any = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Dict = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : List[str] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : int = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Optional[int] = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Optional[Any] = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : Dict = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=64 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[Any]:
a : Dict = parent
a : List[str] = batch_size
a : Any = seq_length
a : Dict = is_training
a : List[str] = use_input_mask
a : Any = use_token_type_ids
a : List[Any] = use_labels
a : Tuple = vocab_size
a : List[Any] = hidden_size
a : Optional[int] = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : int = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Any = type_sequence_label_size
a : List[str] = initializer_range
a : List[Any] = num_labels
a : Optional[int] = num_choices
a : Optional[Any] = scope
a : List[str] = vocab_size - 1
def __a ( self ) -> List[Any]:
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Tuple = None
if self.use_input_mask:
a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
a : List[str] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self ) -> Any:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __a ( self ) -> List[Any]:
a, a, a, a : List[Any] = self.prepare_config_and_inputs()
a : int = True
return config, input_ids, input_mask, token_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : Dict = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : str = True
a : Dict = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : Union[str, Any] = self.num_labels
a : Optional[int] = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Union[str, Any] = self.num_labels
a : Union[str, Any] = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : List[str] = self.num_labels
a : Tuple = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Any = True
a : Union[str, Any] = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
a : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
a : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
a : Union[str, Any] = output_from_no_past["hidden_states"][0]
a : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
a : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
a : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Any:
a : Any = self.prepare_config_and_inputs()
a, a, a, a : int = config_and_inputs
a : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =(
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : str =(GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase : List[Any] =(
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : int =False
lowerCamelCase : List[Any] =False
def __a ( self ) -> Optional[int]:
a : Optional[int] = GPTNeoXModelTester(self )
a : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=64 , num_attention_heads=8 )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
a, a, a, a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a, a, a, a : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
a, a, a, a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
a : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a, a, a, a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __a ( self ) -> int:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a, a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = ids_tensor([1, 10] , config.vocab_size )
a : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : str = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
a : str = original_model(lowerCAmelCase__ ).last_hidden_state
a : Dict = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : int = {"type": scaling_type, "factor": 10.0}
a : Optional[int] = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
a : Optional[Any] = scaled_model(lowerCAmelCase__ ).last_hidden_state
a : Optional[int] = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[Any]:
a : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
a : int = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
a : Any = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
a : int = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
a : Optional[Any] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=20 )
a : str = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 105
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
a : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
a : int = int(lowerCAmelCase__ )
a : Any = dict(sorted(self.labels.items() ) )
def __a ( self , lowerCAmelCase__ ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
a : Dict = len(lowerCAmelCase__ )
a : Tuple = self.transformer.config.sample_size
a : Tuple = self.transformer.config.in_channels
a : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
a : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a : List[str] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
a : str = torch.tensor([1000] * batch_size , device=self.device )
a : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a : Any = latent_model_input[: len(lowerCAmelCase__ ) // 2]
a : Tuple = torch.cat([half, half] , dim=0 )
a : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a : Optional[int] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = torch.floataa if is_mps else torch.floataa
else:
a : str = torch.intaa if is_mps else torch.intaa
a : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a : Union[str, Any] = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
a, a : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a, a : Union[str, Any] = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
a : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
a : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a, a : str = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
a : Any = noise_pred
# compute previous image: x_t -> x_t-1
a : Optional[int] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
a, a : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
a : Tuple = latent_model_input
a : Optional[Any] = 1 / self.vae.config.scaling_factor * latents
a : Any = self.vae.decode(lowerCAmelCase__ ).sample
a : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a : int = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105
| 1
|
import requests
from bsa import BeautifulSoup
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : dict ):
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCAmelCase_, params=lowerCAmelCase_ ).content, 'html.parser' )
__lowerCAmelCase = soup.find('div', attrs={'class': 'gs_ri'} )
__lowerCAmelCase = div.find('div', attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case : Tuple = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 364
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_snake_case : int = True
except (ImportError, AttributeError):
_snake_case : int = object
def a_ ( *lowerCAmelCase_ : List[str], **lowerCAmelCase_ : Optional[Any] ):
pass
_snake_case : Union[str, Any] = False
_snake_case : int = logging.get_logger('transformers-cli/serving')
def a_ ( lowerCAmelCase_ : Namespace ):
__lowerCAmelCase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCAmelCase_, args.host, args.port, args.workers )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> Union[str, Any]:
__lowerCAmelCase = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8_8_8_8 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : List[str] , lowerCAmelCase_ : Pipeline , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
__lowerCAmelCase = pipeline
__lowerCAmelCase = host
__lowerCAmelCase = port
__lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__lowerCAmelCase = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
] , timeout=6_0_0 , )
def lowercase ( self : Tuple ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self : Any ) -> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self : int , lowerCAmelCase_ : str = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Dict:
try:
__lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
__lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
def lowercase ( self : int , lowerCAmelCase_ : List[int] = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Union[str, Any]:
try:
__lowerCAmelCase = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
async def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> int:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCAmelCase = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_0_0 , {'error': str(lowerCAmelCase_ )} )
| 207
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a__ = test_metrics
@require_cpu
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def lowerCamelCase__( self :List[Any] ) -> int:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase__( self :Any ) -> Dict:
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase__( self :Tuple ) -> str:
print(F'Found {torch.cuda.device_count()} devices.' )
a__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
| 240
|
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowercase ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowercase ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vq_model
__lowerCAmelCase = LDMPipeline(unet=lowerCAmelCase_ , vqvae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
ldm.to(lowerCAmelCase_ )
ldm.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = ldm(generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='numpy' ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = ldm(generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowerCAmelCase_ )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
__lowerCAmelCase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowerCAmelCase_ )
ldm.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = ldm(generator=lowerCAmelCase_ , num_inference_steps=5 , output_type='numpy' ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCAmelCase = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
__lowerCAmelCase = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 207
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = FlaxBertModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ : str ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ : str ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = FlaxAutoModel.from_pretrained('bert-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(UpperCamelCase__ , 'Use `from_pt=True` to load this model' ):
UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 28
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = ['''PerceiverFeatureExtractor''']
__SCREAMING_SNAKE_CASE : str = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case (__lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_snake_case : List[Any] = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
_lowerCamelCase = OPTConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=16 , lowercase_=16 , ):
_snake_case : Dict = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Dict = is_training
_snake_case : List[Any] = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = eos_token_id
_snake_case : Optional[int] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : List[Any] = embed_dim
_snake_case : Optional[int] = word_embed_proj_dim
_snake_case : Union[str, Any] = False
def UpperCamelCase ( self ):
_snake_case : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : int = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
_snake_case : Any = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Any = TFOPTModel(config=lowercase_ )
_snake_case : int = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["attention_mask"][:1, :]
_snake_case : List[str] = 1
# first forward pass
_snake_case : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCamelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 10
def UpperCamelCase ( self ):
_snake_case : Dict = TFOPTModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_snake_case : Dict = model_class(config=lowercase_ )
_snake_case : Union[str, Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Optional[Any] = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
_snake_case : int = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_snake_case : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
_snake_case : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : Optional[int] = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
_snake_case : Optional[int] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : str = False
self.assertTrue(lowercase_ )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
@require_tf
class lowercase_ ( unittest.TestCase ):
_lowerCamelCase = 99
def UpperCamelCase ( self ):
_snake_case : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_snake_case : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFOPTModel.from_pretrained("facebook/opt-350m" )
_snake_case : Optional[Any] = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case : Optional[int] = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
_snake_case : List[Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
_snake_case : List[str] = (1, 11, 512)
self.assertEqual(output.shape , lowercase_ )
_snake_case : Optional[int] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = "facebook/opt-350m"
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
_snake_case : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_snake_case : List[str] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_snake_case : List[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ , add_special_tokens=lowercase_ )
_snake_case : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_snake_case : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase ( self ):
_snake_case : List[Any] = "facebook/opt-125m"
_snake_case : int = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : Any = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : str = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = "facebook/opt-350m"
_snake_case : Dict = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : Dict = TFOPTForCausalLM.from_pretrained(lowercase_ )
_snake_case : int = "left"
# use different length sentences to test batching
_snake_case : Union[str, Any] = [
"Hello, my dog is a little",
"Today, I",
]
_snake_case : Optional[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ )
_snake_case : List[Any] = inputs["input_ids"]
_snake_case : Union[str, Any] = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] )
_snake_case : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_snake_case : List[str] = model.generate(input_ids=lowercase_ )
_snake_case : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_snake_case : int = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_snake_case : int = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_snake_case : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Optional[Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase ( self ):
_snake_case : Tuple = "facebook/opt-350m"
_snake_case : Optional[int] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : str = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[str] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : Dict = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 284
| 0
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase_ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = BlenderbotSmallTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=__magic_name__ , merges=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , ) , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , **__magic_name__ , )
snake_case_ : List[Any] = add_prefix_space
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> List[str]:
'''simple docstring'''
snake_case_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 279
|
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ : Optional[Any] = 0
# the cached sizes of the previous chains
snake_case_ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
snake_case_ : List[str] = set()
snake_case_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 279
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = KandinskyImgaImgPipeline
lowercase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
lowercase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
lowercase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def __lowercase ( self : List[Any] ) -> str:
return 32
@property
def __lowercase ( self : Dict ) -> Optional[Any]:
return 32
@property
def __lowercase ( self : int ) -> Optional[int]:
return self.time_input_dim
@property
def __lowercase ( self : Optional[int] ) -> Any:
return self.time_input_dim * 4
@property
def __lowercase ( self : List[str] ) -> Dict:
return 1_00
@property
def __lowercase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase_ : Any = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
lowerCAmelCase_ : str = MultilingualCLIP(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : List[Any] ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase_ : List[Any] = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def __lowercase ( self : int ) -> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = self.dummy_text_encoder
lowerCAmelCase_ : List[str] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_unet
lowerCAmelCase_ : Union[str, Any] = self.dummy_movq
lowerCAmelCase_ : Any = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase_ : List[Any] = DDIMScheduler(**lowerCamelCase )
lowerCAmelCase_ : List[str] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowercase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : int=0 ) -> Any:
lowerCAmelCase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase )
# create init_image
lowerCAmelCase_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(lowerCamelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Any = torch.manual_seed(lowerCamelCase )
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCAmelCase_ : str = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = """cpu"""
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Optional[Any] = self.pipeline_class(**lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : int = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
lowerCAmelCase_ : Tuple = output.images
lowerCAmelCase_ : int = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Tuple = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Dict ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
lowerCAmelCase_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase_ : int = """A red cartoon frog, 4k"""
lowerCAmelCase_ : Dict = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
lowerCAmelCase_ : Any = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase_ : Tuple = pipeline(
lowerCamelCase , image=lowerCamelCase , image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
lowerCAmelCase_ : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 89
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A : Tuple = "sshleifer/mar_enro_6_3_student"
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : List[Any] ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : Any = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __lowercase ( self : str ) -> str:
MarianMTModel.from_pretrained(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : str = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowerCAmelCase_ : Dict = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowerCAmelCase_ : Optional[int] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase_ : Tuple = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase_ : Tuple = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
lowerCAmelCase_ : Any = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Dict = main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : int = load_json(model.metrics_save_path )
lowerCAmelCase_ : Optional[Any] = metrics["""val"""][0]
lowerCAmelCase_ : Tuple = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : List[str] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : List[str] = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
lowerCAmelCase_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_28,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowerCAmelCase_ : int = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowerCAmelCase_ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowerCAmelCase_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
lowerCAmelCase_ : Dict = 6
lowerCAmelCase_ : List[Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'--num_train_epochs={epochs}',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
lowerCAmelCase_ : int = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase_ : str = distill_main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : Union[str, Any] = load_json(model.metrics_save_path )
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][0]
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : Union[str, Any] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 89
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.